Lines Matching +full:layer +full:- +full:buffer +full:- +full:offset

53 #include <linux/dma-mapping.h>
68 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
104 /* Only two-ports NTB devices are supported */
142 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
272 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
281 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset);
282 static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset);
283 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset);
289 return !strncmp(dev_name(dev), drv->name, strlen(drv->name)); in ntb_transport_bus_match()
299 client = drv_client(dev->driver); in ntb_transport_bus_probe()
300 rc = client->probe(dev); in ntb_transport_bus_probe()
311 client = drv_client(dev->driver); in ntb_transport_bus_remove()
312 client->remove(dev); in ntb_transport_bus_remove()
328 list_add_tail(&nt->entry, &ntb_transport_list); in ntb_bus_init()
336 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) { in ntb_bus_remove()
337 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n", in ntb_bus_remove()
338 dev_name(&client_dev->dev)); in ntb_bus_remove()
339 list_del(&client_dev->entry); in ntb_bus_remove()
340 device_unregister(&client_dev->dev); in ntb_bus_remove()
343 list_del(&nt->entry); in ntb_bus_remove()
355 * ntb_transport_unregister_client_dev - Unregister NTB client device
358 * Unregister an NTB client device with the NTB transport layer
366 list_for_each_entry_safe(client, cd, &nt->client_devs, entry) in ntb_transport_unregister_client_dev()
367 if (!strncmp(dev_name(&client->dev), device_name, in ntb_transport_unregister_client_dev()
369 list_del(&client->entry); in ntb_transport_unregister_client_dev()
370 device_unregister(&client->dev); in ntb_transport_unregister_client_dev()
376 * ntb_transport_register_client_dev - Register NTB client device
379 * Register an NTB client device with the NTB transport layer
381 * Returns: %0 on success or -errno code on error
391 return -ENODEV; in ntb_transport_register_client_dev()
396 node = dev_to_node(&nt->ndev->dev); in ntb_transport_register_client_dev()
401 rc = -ENOMEM; in ntb_transport_register_client_dev()
405 dev = &client_dev->dev; in ntb_transport_register_client_dev()
409 dev->bus = &ntb_transport_bus; in ntb_transport_register_client_dev()
410 dev->release = ntb_transport_client_release; in ntb_transport_register_client_dev()
411 dev->parent = &nt->ndev->dev; in ntb_transport_register_client_dev()
419 list_add_tail(&client_dev->entry, &nt->client_devs); in ntb_transport_register_client_dev()
433 * ntb_transport_register_client - Register NTB client driver
436 * Register an NTB client driver with the NTB transport layer
438 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
442 drv->driver.bus = &ntb_transport_bus; in ntb_transport_register_client()
445 return -ENODEV; in ntb_transport_register_client()
447 return driver_register(&drv->driver); in ntb_transport_register_client()
452 * ntb_transport_unregister_client - Unregister NTB client driver
455 * Unregister an NTB client driver with the NTB transport layer
457 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
461 driver_unregister(&drv->driver); in ntb_transport_unregister_client()
472 qp = filp->private_data; in debugfs_read()
474 if (!qp || !qp->link_is_up) in debugfs_read()
481 return -ENOMEM; in debugfs_read()
484 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
486 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
487 "rx_bytes - \t%llu\n", qp->rx_bytes); in debugfs_read()
488 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
489 "rx_pkts - \t%llu\n", qp->rx_pkts); in debugfs_read()
490 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
491 "rx_memcpy - \t%llu\n", qp->rx_memcpy); in debugfs_read()
492 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
493 "rx_async - \t%llu\n", qp->rx_async); in debugfs_read()
494 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
495 "rx_ring_empty - %llu\n", qp->rx_ring_empty); in debugfs_read()
496 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
497 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); in debugfs_read()
498 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
499 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); in debugfs_read()
500 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
501 "rx_err_ver - \t%llu\n", qp->rx_err_ver); in debugfs_read()
502 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
503 "rx_buff - \t0x%p\n", qp->rx_buff); in debugfs_read()
504 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
505 "rx_index - \t%u\n", qp->rx_index); in debugfs_read()
506 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
507 "rx_max_entry - \t%u\n", qp->rx_max_entry); in debugfs_read()
508 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
509 "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry); in debugfs_read()
511 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
512 "tx_bytes - \t%llu\n", qp->tx_bytes); in debugfs_read()
513 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
514 "tx_pkts - \t%llu\n", qp->tx_pkts); in debugfs_read()
515 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
516 "tx_memcpy - \t%llu\n", qp->tx_memcpy); in debugfs_read()
517 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
518 "tx_async - \t%llu\n", qp->tx_async); in debugfs_read()
519 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
520 "tx_ring_full - \t%llu\n", qp->tx_ring_full); in debugfs_read()
521 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
522 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); in debugfs_read()
523 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
524 "tx_mw - \t0x%p\n", qp->tx_mw); in debugfs_read()
525 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
526 "tx_index (H) - \t%u\n", qp->tx_index); in debugfs_read()
527 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
528 "RRI (T) - \t%u\n", in debugfs_read()
529 qp->remote_rx_info->entry); in debugfs_read()
530 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
531 "tx_max_entry - \t%u\n", qp->tx_max_entry); in debugfs_read()
532 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
533 "free tx - \t%u\n", in debugfs_read()
536 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
538 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
539 "Using TX DMA - \t%s\n", in debugfs_read()
540 qp->tx_dma_chan ? "Yes" : "No"); in debugfs_read()
541 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
542 "Using RX DMA - \t%s\n", in debugfs_read()
543 qp->rx_dma_chan ? "Yes" : "No"); in debugfs_read()
544 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
545 "QP Link - \t%s\n", in debugfs_read()
546 qp->link_is_up ? "Up" : "Down"); in debugfs_read()
547 out_offset += scnprintf(buf + out_offset, out_count - out_offset, in debugfs_read()
586 list_del(&entry->entry); in ntb_list_rm()
607 list_move_tail(&entry->entry, to_list); in ntb_list_mv()
618 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; in ntb_transport_setup_qp_mw()
620 struct ntb_dev *ndev = nt->ndev; in ntb_transport_setup_qp_mw()
627 mw_count = nt->mw_count; in ntb_transport_setup_qp_mw()
628 qp_count = nt->qp_count; in ntb_transport_setup_qp_mw()
631 mw = &nt->mw_vec[mw_num]; in ntb_transport_setup_qp_mw()
633 if (!mw->virt_addr) in ntb_transport_setup_qp_mw()
634 return -ENOMEM; in ntb_transport_setup_qp_mw()
641 rx_size = (unsigned int)mw->xlat_size / num_qps_mw; in ntb_transport_setup_qp_mw()
642 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); in ntb_transport_setup_qp_mw()
643 rx_size -= sizeof(struct ntb_rx_info); in ntb_transport_setup_qp_mw()
645 qp->remote_rx_info = qp->rx_buff + rx_size; in ntb_transport_setup_qp_mw()
648 qp->rx_max_frame = min(transport_mtu, rx_size / 2); in ntb_transport_setup_qp_mw()
649 qp->rx_max_entry = rx_size / qp->rx_max_frame; in ntb_transport_setup_qp_mw()
650 qp->rx_index = 0; in ntb_transport_setup_qp_mw()
657 node = dev_to_node(&ndev->dev); in ntb_transport_setup_qp_mw()
658 for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) { in ntb_transport_setup_qp_mw()
661 return -ENOMEM; in ntb_transport_setup_qp_mw()
663 entry->qp = qp; in ntb_transport_setup_qp_mw()
664 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, in ntb_transport_setup_qp_mw()
665 &qp->rx_free_q); in ntb_transport_setup_qp_mw()
666 qp->rx_alloc_entry++; in ntb_transport_setup_qp_mw()
669 qp->remote_rx_info->entry = qp->rx_max_entry - 1; in ntb_transport_setup_qp_mw()
672 for (i = 0; i < qp->rx_max_entry; i++) { in ntb_transport_setup_qp_mw()
673 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) - in ntb_transport_setup_qp_mw() local
675 memset(offset, 0, sizeof(struct ntb_payload_header)); in ntb_transport_setup_qp_mw()
678 qp->rx_pkts = 0; in ntb_transport_setup_qp_mw()
679 qp->tx_pkts = 0; in ntb_transport_setup_qp_mw()
680 qp->tx_index = 0; in ntb_transport_setup_qp_mw()
689 tasklet_schedule(&qp->rxc_db_work); in ntb_transport_isr()
697 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; in ntb_transport_setup_qp_peer_msi()
698 int spad = qp_num * 2 + nt->msi_spad_offset; in ntb_transport_setup_qp_peer_msi()
700 if (!nt->use_msi) in ntb_transport_setup_qp_peer_msi()
703 if (spad >= ntb_spad_count(nt->ndev)) in ntb_transport_setup_qp_peer_msi()
706 qp->peer_msi_desc.addr_offset = in ntb_transport_setup_qp_peer_msi()
707 ntb_peer_spad_read(qp->ndev, PIDX, spad); in ntb_transport_setup_qp_peer_msi()
708 qp->peer_msi_desc.data = in ntb_transport_setup_qp_peer_msi()
709 ntb_peer_spad_read(qp->ndev, PIDX, spad + 1); in ntb_transport_setup_qp_peer_msi()
711 dev_dbg(&qp->ndev->pdev->dev, "QP%d Peer MSI addr=%x data=%x\n", in ntb_transport_setup_qp_peer_msi()
712 qp_num, qp->peer_msi_desc.addr_offset, qp->peer_msi_desc.data); in ntb_transport_setup_qp_peer_msi()
714 if (qp->peer_msi_desc.addr_offset) { in ntb_transport_setup_qp_peer_msi()
715 qp->use_msi = true; in ntb_transport_setup_qp_peer_msi()
716 dev_info(&qp->ndev->pdev->dev, in ntb_transport_setup_qp_peer_msi()
724 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; in ntb_transport_setup_qp_msi()
725 int spad = qp_num * 2 + nt->msi_spad_offset; in ntb_transport_setup_qp_msi()
728 if (!nt->use_msi) in ntb_transport_setup_qp_msi()
731 if (spad >= ntb_spad_count(nt->ndev)) { in ntb_transport_setup_qp_msi()
732 dev_warn_once(&qp->ndev->pdev->dev, in ntb_transport_setup_qp_msi()
737 ntb_spad_write(qp->ndev, spad, 0); in ntb_transport_setup_qp_msi()
738 ntb_spad_write(qp->ndev, spad + 1, 0); in ntb_transport_setup_qp_msi()
740 if (!qp->msi_irq) { in ntb_transport_setup_qp_msi()
741 qp->msi_irq = ntbm_msi_request_irq(qp->ndev, ntb_transport_isr, in ntb_transport_setup_qp_msi()
743 &qp->msi_desc); in ntb_transport_setup_qp_msi()
744 if (qp->msi_irq < 0) { in ntb_transport_setup_qp_msi()
745 dev_warn(&qp->ndev->pdev->dev, in ntb_transport_setup_qp_msi()
752 rc = ntb_spad_write(qp->ndev, spad, qp->msi_desc.addr_offset); in ntb_transport_setup_qp_msi()
756 rc = ntb_spad_write(qp->ndev, spad + 1, qp->msi_desc.data); in ntb_transport_setup_qp_msi()
760 dev_dbg(&qp->ndev->pdev->dev, "QP%d MSI %d addr=%x data=%x\n", in ntb_transport_setup_qp_msi()
761 qp_num, qp->msi_irq, qp->msi_desc.addr_offset, in ntb_transport_setup_qp_msi()
762 qp->msi_desc.data); in ntb_transport_setup_qp_msi()
767 devm_free_irq(&nt->ndev->dev, qp->msi_irq, qp); in ntb_transport_setup_qp_msi()
774 dev_dbg(&nt->ndev->pdev->dev, "Peer MSI descriptors changed"); in ntb_transport_msi_peer_desc_changed()
776 for (i = 0; i < nt->qp_count; i++) in ntb_transport_msi_peer_desc_changed()
785 dev_dbg(&nt->ndev->pdev->dev, "MSI descriptors changed"); in ntb_transport_msi_desc_changed()
787 for (i = 0; i < nt->qp_count; i++) in ntb_transport_msi_desc_changed()
790 ntb_peer_db_set(nt->ndev, nt->msi_db_mask); in ntb_transport_msi_desc_changed()
795 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; in ntb_free_mw()
796 struct pci_dev *pdev = nt->ndev->pdev; in ntb_free_mw()
798 if (!mw->virt_addr) in ntb_free_mw()
801 ntb_mw_clear_trans(nt->ndev, PIDX, num_mw); in ntb_free_mw()
802 dma_free_coherent(&pdev->dev, mw->alloc_size, in ntb_free_mw()
803 mw->alloc_addr, mw->dma_addr); in ntb_free_mw()
804 mw->xlat_size = 0; in ntb_free_mw()
805 mw->buff_size = 0; in ntb_free_mw()
806 mw->alloc_size = 0; in ntb_free_mw()
807 mw->alloc_addr = NULL; in ntb_free_mw()
808 mw->virt_addr = NULL; in ntb_free_mw()
819 * The buffer here is allocated against the NTB device. The reason to in ntb_alloc_mw_buffer()
820 * use dma_alloc_*() call is to allocate a large IOVA contiguous buffer in ntb_alloc_mw_buffer()
822 * processing, the data is being copied out of the receive buffer to in ntb_alloc_mw_buffer()
824 * is called on the kvaddr of the receive buffer (from dma_alloc_*()) in ntb_alloc_mw_buffer()
830 alloc_addr = dma_alloc_attrs(ntb_dev, mw->alloc_size, in ntb_alloc_mw_buffer()
835 mw->alloc_size); in ntb_alloc_mw_buffer()
836 return -ENOMEM; in ntb_alloc_mw_buffer()
847 if (mw->alloc_size > mw->buff_size) { in ntb_alloc_mw_buffer()
851 rc = -ENOMEM; in ntb_alloc_mw_buffer()
856 mw->alloc_addr = alloc_addr; in ntb_alloc_mw_buffer()
857 mw->virt_addr = virt_addr; in ntb_alloc_mw_buffer()
858 mw->dma_addr = dma_addr; in ntb_alloc_mw_buffer()
863 dma_free_coherent(ntb_dev, mw->alloc_size, alloc_addr, dma_addr); in ntb_alloc_mw_buffer()
871 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; in ntb_set_mw()
872 struct pci_dev *pdev = nt->ndev->pdev; in ntb_set_mw()
879 return -EINVAL; in ntb_set_mw()
881 rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align, in ntb_set_mw()
889 /* No need to re-setup */ in ntb_set_mw()
890 if (mw->xlat_size == xlat_size) in ntb_set_mw()
893 if (mw->buff_size) in ntb_set_mw()
897 mw->xlat_size = xlat_size; in ntb_set_mw()
898 mw->buff_size = buff_size; in ntb_set_mw()
899 mw->alloc_size = buff_size; in ntb_set_mw()
901 rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align); in ntb_set_mw()
903 mw->alloc_size *= 2; in ntb_set_mw()
904 rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align); in ntb_set_mw()
906 dev_err(&pdev->dev, in ntb_set_mw()
908 mw->xlat_size = 0; in ntb_set_mw()
909 mw->buff_size = 0; in ntb_set_mw()
910 mw->alloc_size = 0; in ntb_set_mw()
915 /* Notify HW the memory location of the receive buffer */ in ntb_set_mw()
916 rc = ntb_mw_set_trans(nt->ndev, PIDX, num_mw, mw->dma_addr, in ntb_set_mw()
917 mw->xlat_size); in ntb_set_mw()
919 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw); in ntb_set_mw()
921 return -EIO; in ntb_set_mw()
929 qp->link_is_up = false; in ntb_qp_link_context_reset()
930 qp->active = false; in ntb_qp_link_context_reset()
932 qp->tx_index = 0; in ntb_qp_link_context_reset()
933 qp->rx_index = 0; in ntb_qp_link_context_reset()
934 qp->rx_bytes = 0; in ntb_qp_link_context_reset()
935 qp->rx_pkts = 0; in ntb_qp_link_context_reset()
936 qp->rx_ring_empty = 0; in ntb_qp_link_context_reset()
937 qp->rx_err_no_buf = 0; in ntb_qp_link_context_reset()
938 qp->rx_err_oflow = 0; in ntb_qp_link_context_reset()
939 qp->rx_err_ver = 0; in ntb_qp_link_context_reset()
940 qp->rx_memcpy = 0; in ntb_qp_link_context_reset()
941 qp->rx_async = 0; in ntb_qp_link_context_reset()
942 qp->tx_bytes = 0; in ntb_qp_link_context_reset()
943 qp->tx_pkts = 0; in ntb_qp_link_context_reset()
944 qp->tx_ring_full = 0; in ntb_qp_link_context_reset()
945 qp->tx_err_no_buf = 0; in ntb_qp_link_context_reset()
946 qp->tx_memcpy = 0; in ntb_qp_link_context_reset()
947 qp->tx_async = 0; in ntb_qp_link_context_reset()
953 if (qp->remote_rx_info) in ntb_qp_link_down_reset()
954 qp->remote_rx_info->entry = qp->rx_max_entry - 1; in ntb_qp_link_down_reset()
959 struct ntb_transport_ctx *nt = qp->transport; in ntb_qp_link_cleanup()
960 struct pci_dev *pdev = nt->ndev->pdev; in ntb_qp_link_cleanup()
962 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num); in ntb_qp_link_cleanup()
964 cancel_delayed_work_sync(&qp->link_work); in ntb_qp_link_cleanup()
967 if (qp->event_handler) in ntb_qp_link_cleanup()
968 qp->event_handler(qp->cb_data, qp->link_is_up); in ntb_qp_link_cleanup()
976 struct ntb_transport_ctx *nt = qp->transport; in ntb_qp_link_cleanup_work()
980 if (nt->link_is_up) in ntb_qp_link_cleanup_work()
981 schedule_delayed_work(&qp->link_work, in ntb_qp_link_cleanup_work()
987 schedule_work(&qp->link_cleanup); in ntb_qp_link_down()
996 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; in ntb_transport_link_cleanup()
999 for (i = 0; i < nt->qp_count; i++) in ntb_transport_link_cleanup()
1001 qp = &nt->qp_vec[i]; in ntb_transport_link_cleanup()
1003 cancel_work_sync(&qp->link_cleanup); in ntb_transport_link_cleanup()
1004 cancel_delayed_work_sync(&qp->link_work); in ntb_transport_link_cleanup()
1007 if (!nt->link_is_up) in ntb_transport_link_cleanup()
1008 cancel_delayed_work_sync(&nt->link_work); in ntb_transport_link_cleanup()
1010 for (i = 0; i < nt->mw_count; i++) in ntb_transport_link_cleanup()
1017 count = ntb_spad_count(nt->ndev); in ntb_transport_link_cleanup()
1019 ntb_spad_write(nt->ndev, i, 0); in ntb_transport_link_cleanup()
1034 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1) in ntb_transport_event_callback()
1035 schedule_delayed_work(&nt->link_work, 0); in ntb_transport_event_callback()
1037 schedule_work(&nt->link_cleanup); in ntb_transport_event_callback()
1044 struct ntb_dev *ndev = nt->ndev; in ntb_transport_link_work()
1045 struct pci_dev *pdev = ndev->pdev; in ntb_transport_link_work()
1052 if (nt->use_msi) { in ntb_transport_link_work()
1055 dev_warn(&pdev->dev, in ntb_transport_link_work()
1058 nt->use_msi = false; in ntb_transport_link_work()
1062 for (i = 0; i < nt->qp_count; i++) in ntb_transport_link_work()
1065 for (i = 0; i < nt->mw_count; i++) { in ntb_transport_link_work()
1066 size = nt->mw_vec[i].phys_size; in ntb_transport_link_work()
1078 ntb_peer_spad_write(ndev, PIDX, NUM_MWS, nt->mw_count); in ntb_transport_link_work()
1080 ntb_peer_spad_write(ndev, PIDX, NUM_QPS, nt->qp_count); in ntb_transport_link_work()
1086 dev_dbg(&pdev->dev, "Remote version = %d\n", val); in ntb_transport_link_work()
1091 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); in ntb_transport_link_work()
1092 if (val != nt->qp_count) in ntb_transport_link_work()
1096 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); in ntb_transport_link_work()
1097 if (val != nt->mw_count) in ntb_transport_link_work()
1100 for (i = 0; i < nt->mw_count; i++) { in ntb_transport_link_work()
1109 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64); in ntb_transport_link_work()
1116 nt->link_is_up = true; in ntb_transport_link_work()
1118 for (i = 0; i < nt->qp_count; i++) { in ntb_transport_link_work()
1119 struct ntb_transport_qp *qp = &nt->qp_vec[i]; in ntb_transport_link_work()
1124 if (qp->client_ready) in ntb_transport_link_work()
1125 schedule_delayed_work(&qp->link_work, 0); in ntb_transport_link_work()
1131 for (i = 0; i < nt->mw_count; i++) in ntb_transport_link_work()
1140 schedule_delayed_work(&nt->link_work, in ntb_transport_link_work()
1149 struct pci_dev *pdev = qp->ndev->pdev; in ntb_qp_link_work()
1150 struct ntb_transport_ctx *nt = qp->transport; in ntb_qp_link_work()
1153 WARN_ON(!nt->link_is_up); in ntb_qp_link_work()
1155 val = ntb_spad_read(nt->ndev, QP_LINKS); in ntb_qp_link_work()
1157 ntb_peer_spad_write(nt->ndev, PIDX, QP_LINKS, val | BIT(qp->qp_num)); in ntb_qp_link_work()
1160 dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val); in ntb_qp_link_work()
1163 if (val & BIT(qp->qp_num)) { in ntb_qp_link_work()
1164 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); in ntb_qp_link_work()
1165 qp->link_is_up = true; in ntb_qp_link_work()
1166 qp->active = true; in ntb_qp_link_work()
1168 if (qp->event_handler) in ntb_qp_link_work()
1169 qp->event_handler(qp->cb_data, qp->link_is_up); in ntb_qp_link_work()
1171 if (qp->active) in ntb_qp_link_work()
1172 tasklet_schedule(&qp->rxc_db_work); in ntb_qp_link_work()
1173 } else if (nt->link_is_up) in ntb_qp_link_work()
1174 schedule_delayed_work(&qp->link_work, in ntb_qp_link_work()
1188 mw_count = nt->mw_count; in ntb_transport_init_queue()
1189 qp_count = nt->qp_count; in ntb_transport_init_queue()
1193 qp = &nt->qp_vec[qp_num]; in ntb_transport_init_queue()
1194 qp->qp_num = qp_num; in ntb_transport_init_queue()
1195 qp->transport = nt; in ntb_transport_init_queue()
1196 qp->ndev = nt->ndev; in ntb_transport_init_queue()
1197 qp->client_ready = false; in ntb_transport_init_queue()
1198 qp->event_handler = NULL; in ntb_transport_init_queue()
1206 mw_base = nt->mw_vec[mw_num].phys_addr; in ntb_transport_init_queue()
1207 mw_size = nt->mw_vec[mw_num].phys_size; in ntb_transport_init_queue()
1215 qp->tx_mw_size = tx_size; in ntb_transport_init_queue()
1216 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; in ntb_transport_init_queue()
1217 if (!qp->tx_mw) in ntb_transport_init_queue()
1218 return -EINVAL; in ntb_transport_init_queue()
1220 qp->tx_mw_phys = mw_base + qp_offset; in ntb_transport_init_queue()
1221 if (!qp->tx_mw_phys) in ntb_transport_init_queue()
1222 return -EINVAL; in ntb_transport_init_queue()
1224 tx_size -= sizeof(struct ntb_rx_info); in ntb_transport_init_queue()
1225 qp->rx_info = qp->tx_mw + tx_size; in ntb_transport_init_queue()
1228 qp->tx_max_frame = min(transport_mtu, tx_size / 2); in ntb_transport_init_queue()
1229 qp->tx_max_entry = tx_size / qp->tx_max_frame; in ntb_transport_init_queue()
1231 if (nt->debugfs_node_dir) { in ntb_transport_init_queue()
1235 qp->debugfs_dir = debugfs_create_dir(debugfs_name, in ntb_transport_init_queue()
1236 nt->debugfs_node_dir); in ntb_transport_init_queue()
1238 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, in ntb_transport_init_queue()
1239 qp->debugfs_dir, qp, in ntb_transport_init_queue()
1242 qp->debugfs_dir = NULL; in ntb_transport_init_queue()
1243 qp->debugfs_stats = NULL; in ntb_transport_init_queue()
1246 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); in ntb_transport_init_queue()
1247 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); in ntb_transport_init_queue()
1249 spin_lock_init(&qp->ntb_rx_q_lock); in ntb_transport_init_queue()
1250 spin_lock_init(&qp->ntb_tx_free_q_lock); in ntb_transport_init_queue()
1252 INIT_LIST_HEAD(&qp->rx_post_q); in ntb_transport_init_queue()
1253 INIT_LIST_HEAD(&qp->rx_pend_q); in ntb_transport_init_queue()
1254 INIT_LIST_HEAD(&qp->rx_free_q); in ntb_transport_init_queue()
1255 INIT_LIST_HEAD(&qp->tx_free_q); in ntb_transport_init_queue()
1257 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db, in ntb_transport_init_queue()
1274 if (!ndev->ops->mw_set_trans) { in ntb_transport_probe()
1275 dev_err(&ndev->dev, "Inbound MW based NTB API is required\n"); in ntb_transport_probe()
1276 return -EINVAL; in ntb_transport_probe()
1280 dev_dbg(&ndev->dev, in ntb_transport_probe()
1283 dev_dbg(&ndev->dev, in ntb_transport_probe()
1287 dev_warn(&ndev->dev, "Multi-port NTB devices unsupported\n"); in ntb_transport_probe()
1289 node = dev_to_node(&ndev->dev); in ntb_transport_probe()
1293 return -ENOMEM; in ntb_transport_probe()
1295 nt->ndev = ndev; in ntb_transport_probe()
1304 mw_count -= 1; in ntb_transport_probe()
1305 nt->use_msi = true; in ntb_transport_probe()
1314 nt->mw_count = 0; in ntb_transport_probe()
1315 rc = -EINVAL; in ntb_transport_probe()
1319 max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2; in ntb_transport_probe()
1320 nt->mw_count = min(mw_count, max_mw_count_for_spads); in ntb_transport_probe()
1322 nt->msi_spad_offset = nt->mw_count * 2 + MW0_SZ_HIGH; in ntb_transport_probe()
1324 nt->mw_vec = kcalloc_node(mw_count, sizeof(*nt->mw_vec), in ntb_transport_probe()
1326 if (!nt->mw_vec) { in ntb_transport_probe()
1327 rc = -ENOMEM; in ntb_transport_probe()
1332 mw = &nt->mw_vec[i]; in ntb_transport_probe()
1334 rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr, in ntb_transport_probe()
1335 &mw->phys_size); in ntb_transport_probe()
1339 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size); in ntb_transport_probe()
1340 if (!mw->vbase) { in ntb_transport_probe()
1341 rc = -ENOMEM; in ntb_transport_probe()
1345 mw->buff_size = 0; in ntb_transport_probe()
1346 mw->xlat_size = 0; in ntb_transport_probe()
1347 mw->virt_addr = NULL; in ntb_transport_probe()
1348 mw->dma_addr = 0; in ntb_transport_probe()
1354 if (nt->use_msi) { in ntb_transport_probe()
1355 qp_count -= 1; in ntb_transport_probe()
1356 nt->msi_db_mask = BIT_ULL(qp_count); in ntb_transport_probe()
1357 ntb_db_clear_mask(ndev, nt->msi_db_mask); in ntb_transport_probe()
1362 else if (nt->mw_count < qp_count) in ntb_transport_probe()
1363 qp_count = nt->mw_count; in ntb_transport_probe()
1365 qp_bitmap &= BIT_ULL(qp_count) - 1; in ntb_transport_probe()
1367 nt->qp_count = qp_count; in ntb_transport_probe()
1368 nt->qp_bitmap = qp_bitmap; in ntb_transport_probe()
1369 nt->qp_bitmap_free = qp_bitmap; in ntb_transport_probe()
1371 nt->qp_vec = kcalloc_node(qp_count, sizeof(*nt->qp_vec), in ntb_transport_probe()
1373 if (!nt->qp_vec) { in ntb_transport_probe()
1374 rc = -ENOMEM; in ntb_transport_probe()
1379 nt->debugfs_node_dir = in ntb_transport_probe()
1380 debugfs_create_dir(pci_name(ndev->pdev), in ntb_transport_probe()
1390 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); in ntb_transport_probe()
1391 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work); in ntb_transport_probe()
1397 INIT_LIST_HEAD(&nt->client_devs); in ntb_transport_probe()
1402 nt->link_is_up = false; in ntb_transport_probe()
1411 kfree(nt->qp_vec); in ntb_transport_probe()
1413 while (i--) { in ntb_transport_probe()
1414 mw = &nt->mw_vec[i]; in ntb_transport_probe()
1415 iounmap(mw->vbase); in ntb_transport_probe()
1417 kfree(nt->mw_vec); in ntb_transport_probe()
1425 struct ntb_transport_ctx *nt = ndev->ctx; in ntb_transport_free()
1431 cancel_work_sync(&nt->link_cleanup); in ntb_transport_free()
1432 cancel_delayed_work_sync(&nt->link_work); in ntb_transport_free()
1434 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; in ntb_transport_free()
1437 for (i = 0; i < nt->qp_count; i++) { in ntb_transport_free()
1438 qp = &nt->qp_vec[i]; in ntb_transport_free()
1441 debugfs_remove_recursive(qp->debugfs_dir); in ntb_transport_free()
1449 for (i = nt->mw_count; i--; ) { in ntb_transport_free()
1451 iounmap(nt->mw_vec[i].vbase); in ntb_transport_free()
1454 kfree(nt->qp_vec); in ntb_transport_free()
1455 kfree(nt->mw_vec); in ntb_transport_free()
1466 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); in ntb_complete_rxc()
1468 while (!list_empty(&qp->rx_post_q)) { in ntb_complete_rxc()
1469 entry = list_first_entry(&qp->rx_post_q, in ntb_complete_rxc()
1471 if (!(entry->flags & DESC_DONE_FLAG)) in ntb_complete_rxc()
1474 entry->rx_hdr->flags = 0; in ntb_complete_rxc()
1475 iowrite32(entry->rx_index, &qp->rx_info->entry); in ntb_complete_rxc()
1477 cb_data = entry->cb_data; in ntb_complete_rxc()
1478 len = entry->len; in ntb_complete_rxc()
1480 list_move_tail(&entry->entry, &qp->rx_free_q); in ntb_complete_rxc()
1482 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); in ntb_complete_rxc()
1484 if (qp->rx_handler && qp->client_ready) in ntb_complete_rxc()
1485 qp->rx_handler(qp, qp->cb_data, cb_data, len); in ntb_complete_rxc()
1487 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); in ntb_complete_rxc()
1490 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); in ntb_complete_rxc()
1500 enum dmaengine_tx_result dma_err = res->result; in ntb_rx_copy_callback()
1505 entry->errors++; in ntb_rx_copy_callback()
1509 struct ntb_transport_qp *qp = entry->qp; in ntb_rx_copy_callback()
1510 void *offset = qp->rx_buff + qp->rx_max_frame * in ntb_rx_copy_callback() local
1511 qp->rx_index; in ntb_rx_copy_callback()
1513 ntb_memcpy_rx(entry, offset); in ntb_rx_copy_callback()
1514 qp->rx_memcpy++; in ntb_rx_copy_callback()
1524 entry->flags |= DESC_DONE_FLAG; in ntb_rx_copy_callback()
1526 ntb_complete_rxc(entry->qp); in ntb_rx_copy_callback()
1529 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) in ntb_memcpy_rx() argument
1531 void *buf = entry->buf; in ntb_memcpy_rx()
1532 size_t len = entry->len; in ntb_memcpy_rx()
1534 memcpy(buf, offset, len); in ntb_memcpy_rx()
1542 static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) in ntb_async_rx_submit() argument
1545 struct ntb_transport_qp *qp = entry->qp; in ntb_async_rx_submit()
1546 struct dma_chan *chan = qp->rx_dma_chan; in ntb_async_rx_submit()
1551 void *buf = entry->buf; in ntb_async_rx_submit()
1553 len = entry->len; in ntb_async_rx_submit()
1554 device = chan->device; in ntb_async_rx_submit()
1555 pay_off = (size_t)offset & ~PAGE_MASK; in ntb_async_rx_submit()
1561 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); in ntb_async_rx_submit()
1565 unmap->len = len; in ntb_async_rx_submit()
1566 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), in ntb_async_rx_submit()
1568 if (dma_mapping_error(device->dev, unmap->addr[0])) in ntb_async_rx_submit()
1571 unmap->to_cnt = 1; in ntb_async_rx_submit()
1573 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), in ntb_async_rx_submit()
1575 if (dma_mapping_error(device->dev, unmap->addr[1])) in ntb_async_rx_submit()
1578 unmap->from_cnt = 1; in ntb_async_rx_submit()
1580 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], in ntb_async_rx_submit()
1581 unmap->addr[0], len, in ntb_async_rx_submit()
1586 txd->callback_result = ntb_rx_copy_callback; in ntb_async_rx_submit()
1587 txd->callback_param = entry; in ntb_async_rx_submit()
1596 qp->last_cookie = cookie; in ntb_async_rx_submit()
1598 qp->rx_async++; in ntb_async_rx_submit()
1607 return -ENXIO; in ntb_async_rx_submit()
1610 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) in ntb_async_rx() argument
1612 struct ntb_transport_qp *qp = entry->qp; in ntb_async_rx()
1613 struct dma_chan *chan = qp->rx_dma_chan; in ntb_async_rx()
1619 if (entry->len < copy_bytes) in ntb_async_rx()
1622 res = ntb_async_rx_submit(entry, offset); in ntb_async_rx()
1626 if (!entry->retries) in ntb_async_rx()
1627 qp->rx_async++; in ntb_async_rx()
1632 ntb_memcpy_rx(entry, offset); in ntb_async_rx()
1633 qp->rx_memcpy++; in ntb_async_rx()
1640 void *offset; in ntb_process_rxc() local
1642 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; in ntb_process_rxc()
1643 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); in ntb_process_rxc()
1645 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n", in ntb_process_rxc()
1646 qp->qp_num, hdr->ver, hdr->len, hdr->flags); in ntb_process_rxc()
1648 if (!(hdr->flags & DESC_DONE_FLAG)) { in ntb_process_rxc()
1649 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n"); in ntb_process_rxc()
1650 qp->rx_ring_empty++; in ntb_process_rxc()
1651 return -EAGAIN; in ntb_process_rxc()
1654 if (hdr->flags & LINK_DOWN_FLAG) { in ntb_process_rxc()
1655 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n"); in ntb_process_rxc()
1657 hdr->flags = 0; in ntb_process_rxc()
1658 return -EAGAIN; in ntb_process_rxc()
1661 if (hdr->ver != (u32)qp->rx_pkts) { in ntb_process_rxc()
1662 dev_dbg(&qp->ndev->pdev->dev, in ntb_process_rxc()
1663 "version mismatch, expected %llu - got %u\n", in ntb_process_rxc()
1664 qp->rx_pkts, hdr->ver); in ntb_process_rxc()
1665 qp->rx_err_ver++; in ntb_process_rxc()
1666 return -EIO; in ntb_process_rxc()
1669 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); in ntb_process_rxc()
1671 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); in ntb_process_rxc()
1672 qp->rx_err_no_buf++; in ntb_process_rxc()
1673 return -EAGAIN; in ntb_process_rxc()
1676 entry->rx_hdr = hdr; in ntb_process_rxc()
1677 entry->rx_index = qp->rx_index; in ntb_process_rxc()
1679 if (hdr->len > entry->len) { in ntb_process_rxc()
1680 dev_dbg(&qp->ndev->pdev->dev, in ntb_process_rxc()
1681 "receive buffer overflow! Wanted %d got %d\n", in ntb_process_rxc()
1682 hdr->len, entry->len); in ntb_process_rxc()
1683 qp->rx_err_oflow++; in ntb_process_rxc()
1685 entry->len = -EIO; in ntb_process_rxc()
1686 entry->flags |= DESC_DONE_FLAG; in ntb_process_rxc()
1690 dev_dbg(&qp->ndev->pdev->dev, in ntb_process_rxc()
1692 qp->rx_index, hdr->ver, hdr->len, entry->len); in ntb_process_rxc()
1694 qp->rx_bytes += hdr->len; in ntb_process_rxc()
1695 qp->rx_pkts++; in ntb_process_rxc()
1697 entry->len = hdr->len; in ntb_process_rxc()
1699 ntb_async_rx(entry, offset); in ntb_process_rxc()
1702 qp->rx_index++; in ntb_process_rxc()
1703 qp->rx_index %= qp->rx_max_entry; in ntb_process_rxc()
1713 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n", in ntb_transport_rxc_db()
1714 __func__, qp->qp_num); in ntb_transport_rxc_db()
1719 for (i = 0; i < qp->rx_max_entry; i++) { in ntb_transport_rxc_db()
1725 if (i && qp->rx_dma_chan) in ntb_transport_rxc_db()
1726 dma_async_issue_pending(qp->rx_dma_chan); in ntb_transport_rxc_db()
1728 if (i == qp->rx_max_entry) { in ntb_transport_rxc_db()
1730 if (qp->active) in ntb_transport_rxc_db()
1731 tasklet_schedule(&qp->rxc_db_work); in ntb_transport_rxc_db()
1732 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { in ntb_transport_rxc_db()
1734 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); in ntb_transport_rxc_db()
1736 ntb_db_read(qp->ndev); in ntb_transport_rxc_db()
1742 if (qp->active) in ntb_transport_rxc_db()
1743 tasklet_schedule(&qp->rxc_db_work); in ntb_transport_rxc_db()
1751 struct ntb_transport_qp *qp = entry->qp; in ntb_tx_copy_callback()
1752 struct ntb_payload_header __iomem *hdr = entry->tx_hdr; in ntb_tx_copy_callback()
1756 enum dmaengine_tx_result dma_err = res->result; in ntb_tx_copy_callback()
1761 entry->errors++; in ntb_tx_copy_callback()
1765 void __iomem *offset = in ntb_tx_copy_callback() local
1766 qp->tx_mw + qp->tx_max_frame * in ntb_tx_copy_callback()
1767 entry->tx_index; in ntb_tx_copy_callback()
1770 ntb_memcpy_tx(entry, offset); in ntb_tx_copy_callback()
1771 qp->tx_memcpy++; in ntb_tx_copy_callback()
1781 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); in ntb_tx_copy_callback()
1783 if (qp->use_msi) in ntb_tx_copy_callback()
1784 ntb_msi_peer_trigger(qp->ndev, PIDX, &qp->peer_msi_desc); in ntb_tx_copy_callback()
1786 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); in ntb_tx_copy_callback()
1792 if (entry->len > 0) { in ntb_tx_copy_callback()
1793 qp->tx_bytes += entry->len; in ntb_tx_copy_callback()
1795 if (qp->tx_handler) in ntb_tx_copy_callback()
1796 qp->tx_handler(qp, qp->cb_data, entry->cb_data, in ntb_tx_copy_callback()
1797 entry->len); in ntb_tx_copy_callback()
1800 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); in ntb_tx_copy_callback()
1803 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset) in ntb_memcpy_tx() argument
1807 * Using non-temporal mov to improve performance on non-cached in ntb_memcpy_tx()
1810 __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len); in ntb_memcpy_tx()
1812 memcpy_toio(offset, entry->buf, entry->len); in ntb_memcpy_tx()
1825 struct dma_chan *chan = qp->tx_dma_chan; in ntb_async_tx_submit()
1827 size_t len = entry->len; in ntb_async_tx_submit()
1828 void *buf = entry->buf; in ntb_async_tx_submit()
1834 device = chan->device; in ntb_async_tx_submit()
1835 dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index; in ntb_async_tx_submit()
1842 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); in ntb_async_tx_submit()
1846 unmap->len = len; in ntb_async_tx_submit()
1847 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), in ntb_async_tx_submit()
1849 if (dma_mapping_error(device->dev, unmap->addr[0])) in ntb_async_tx_submit()
1852 unmap->to_cnt = 1; in ntb_async_tx_submit()
1854 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, in ntb_async_tx_submit()
1859 txd->callback_result = ntb_tx_copy_callback; in ntb_async_tx_submit()
1860 txd->callback_param = entry; in ntb_async_tx_submit()
1877 return -ENXIO; in ntb_async_tx_submit()
1884 struct dma_chan *chan = qp->tx_dma_chan; in ntb_async_tx()
1885 void __iomem *offset; in ntb_async_tx() local
1888 entry->tx_index = qp->tx_index; in ntb_async_tx()
1889 offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index; in ntb_async_tx()
1890 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); in ntb_async_tx()
1891 entry->tx_hdr = hdr; in ntb_async_tx()
1893 iowrite32(entry->len, &hdr->len); in ntb_async_tx()
1894 iowrite32((u32)qp->tx_pkts, &hdr->ver); in ntb_async_tx()
1899 if (entry->len < copy_bytes) in ntb_async_tx()
1906 if (!entry->retries) in ntb_async_tx()
1907 qp->tx_async++; in ntb_async_tx()
1912 ntb_memcpy_tx(entry, offset); in ntb_async_tx()
1913 qp->tx_memcpy++; in ntb_async_tx()
1920 qp->tx_ring_full++; in ntb_process_tx()
1921 return -EAGAIN; in ntb_process_tx()
1924 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { in ntb_process_tx()
1925 if (qp->tx_handler) in ntb_process_tx()
1926 qp->tx_handler(qp, qp->cb_data, NULL, -EIO); in ntb_process_tx()
1928 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, in ntb_process_tx()
1929 &qp->tx_free_q); in ntb_process_tx()
1935 qp->tx_index++; in ntb_process_tx()
1936 qp->tx_index %= qp->tx_max_entry; in ntb_process_tx()
1938 qp->tx_pkts++; in ntb_process_tx()
1945 struct pci_dev *pdev = qp->ndev->pdev; in ntb_send_link_down()
1949 if (!qp->link_is_up) in ntb_send_link_down()
1952 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num); in ntb_send_link_down()
1955 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); in ntb_send_link_down()
1964 entry->cb_data = NULL; in ntb_send_link_down()
1965 entry->buf = NULL; in ntb_send_link_down()
1966 entry->len = 0; in ntb_send_link_down()
1967 entry->flags = LINK_DOWN_FLAG; in ntb_send_link_down()
1971 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n", in ntb_send_link_down()
1972 qp->qp_num); in ntb_send_link_down()
1979 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node; in ntb_dma_filter_fn()
1983 * ntb_transport_create_queue - Create a new NTB transport layer queue
1988 * Create a new NTB transport layer queue and provide the queue with a callback
2011 ndev = dev_ntb(client_dev->parent); in ntb_transport_create_queue()
2012 pdev = ndev->pdev; in ntb_transport_create_queue()
2013 nt = ndev->ctx; in ntb_transport_create_queue()
2015 node = dev_to_node(&ndev->dev); in ntb_transport_create_queue()
2017 free_queue = ffs(nt->qp_bitmap_free); in ntb_transport_create_queue()
2022 free_queue--; in ntb_transport_create_queue()
2024 qp = &nt->qp_vec[free_queue]; in ntb_transport_create_queue()
2025 qp_bit = BIT_ULL(qp->qp_num); in ntb_transport_create_queue()
2027 nt->qp_bitmap_free &= ~qp_bit; in ntb_transport_create_queue()
2029 qp->cb_data = data; in ntb_transport_create_queue()
2030 qp->rx_handler = handlers->rx_handler; in ntb_transport_create_queue()
2031 qp->tx_handler = handlers->tx_handler; in ntb_transport_create_queue()
2032 qp->event_handler = handlers->event_handler; in ntb_transport_create_queue()
2038 qp->tx_dma_chan = in ntb_transport_create_queue()
2041 if (!qp->tx_dma_chan) in ntb_transport_create_queue()
2042 dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n"); in ntb_transport_create_queue()
2044 qp->rx_dma_chan = in ntb_transport_create_queue()
2047 if (!qp->rx_dma_chan) in ntb_transport_create_queue()
2048 dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n"); in ntb_transport_create_queue()
2050 qp->tx_dma_chan = NULL; in ntb_transport_create_queue()
2051 qp->rx_dma_chan = NULL; in ntb_transport_create_queue()
2054 qp->tx_mw_dma_addr = 0; in ntb_transport_create_queue()
2055 if (qp->tx_dma_chan) { in ntb_transport_create_queue()
2056 qp->tx_mw_dma_addr = in ntb_transport_create_queue()
2057 dma_map_resource(qp->tx_dma_chan->device->dev, in ntb_transport_create_queue()
2058 qp->tx_mw_phys, qp->tx_mw_size, in ntb_transport_create_queue()
2060 if (dma_mapping_error(qp->tx_dma_chan->device->dev, in ntb_transport_create_queue()
2061 qp->tx_mw_dma_addr)) { in ntb_transport_create_queue()
2062 qp->tx_mw_dma_addr = 0; in ntb_transport_create_queue()
2067 dev_dbg(&pdev->dev, "Using %s memcpy for TX\n", in ntb_transport_create_queue()
2068 qp->tx_dma_chan ? "DMA" : "CPU"); in ntb_transport_create_queue()
2070 dev_dbg(&pdev->dev, "Using %s memcpy for RX\n", in ntb_transport_create_queue()
2071 qp->rx_dma_chan ? "DMA" : "CPU"); in ntb_transport_create_queue()
2078 entry->qp = qp; in ntb_transport_create_queue()
2079 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, in ntb_transport_create_queue()
2080 &qp->rx_free_q); in ntb_transport_create_queue()
2082 qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES; in ntb_transport_create_queue()
2084 for (i = 0; i < qp->tx_max_entry; i++) { in ntb_transport_create_queue()
2089 entry->qp = qp; in ntb_transport_create_queue()
2090 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, in ntb_transport_create_queue()
2091 &qp->tx_free_q); in ntb_transport_create_queue()
2094 ntb_db_clear(qp->ndev, qp_bit); in ntb_transport_create_queue()
2095 ntb_db_clear_mask(qp->ndev, qp_bit); in ntb_transport_create_queue()
2097 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); in ntb_transport_create_queue()
2102 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) in ntb_transport_create_queue()
2105 qp->rx_alloc_entry = 0; in ntb_transport_create_queue()
2106 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) in ntb_transport_create_queue()
2108 if (qp->tx_mw_dma_addr) in ntb_transport_create_queue()
2109 dma_unmap_resource(qp->tx_dma_chan->device->dev, in ntb_transport_create_queue()
2110 qp->tx_mw_dma_addr, qp->tx_mw_size, in ntb_transport_create_queue()
2112 if (qp->tx_dma_chan) in ntb_transport_create_queue()
2113 dma_release_channel(qp->tx_dma_chan); in ntb_transport_create_queue()
2114 if (qp->rx_dma_chan) in ntb_transport_create_queue()
2115 dma_release_channel(qp->rx_dma_chan); in ntb_transport_create_queue()
2116 nt->qp_bitmap_free |= qp_bit; in ntb_transport_create_queue()
2123 * ntb_transport_free_queue - Frees NTB transport queue
2137 pdev = qp->ndev->pdev; in ntb_transport_free_queue()
2139 qp->active = false; in ntb_transport_free_queue()
2141 if (qp->tx_dma_chan) { in ntb_transport_free_queue()
2142 struct dma_chan *chan = qp->tx_dma_chan; in ntb_transport_free_queue()
2146 qp->tx_dma_chan = NULL; in ntb_transport_free_queue()
2151 dma_sync_wait(chan, qp->last_cookie); in ntb_transport_free_queue()
2154 dma_unmap_resource(chan->device->dev, in ntb_transport_free_queue()
2155 qp->tx_mw_dma_addr, qp->tx_mw_size, in ntb_transport_free_queue()
2161 if (qp->rx_dma_chan) { in ntb_transport_free_queue()
2162 struct dma_chan *chan = qp->rx_dma_chan; in ntb_transport_free_queue()
2166 qp->rx_dma_chan = NULL; in ntb_transport_free_queue()
2171 dma_sync_wait(chan, qp->last_cookie); in ntb_transport_free_queue()
2176 qp_bit = BIT_ULL(qp->qp_num); in ntb_transport_free_queue()
2178 ntb_db_set_mask(qp->ndev, qp_bit); in ntb_transport_free_queue()
2179 tasklet_kill(&qp->rxc_db_work); in ntb_transport_free_queue()
2181 cancel_delayed_work_sync(&qp->link_work); in ntb_transport_free_queue()
2183 qp->cb_data = NULL; in ntb_transport_free_queue()
2184 qp->rx_handler = NULL; in ntb_transport_free_queue()
2185 qp->tx_handler = NULL; in ntb_transport_free_queue()
2186 qp->event_handler = NULL; in ntb_transport_free_queue()
2188 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) in ntb_transport_free_queue()
2191 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { in ntb_transport_free_queue()
2192 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n"); in ntb_transport_free_queue()
2196 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { in ntb_transport_free_queue()
2197 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n"); in ntb_transport_free_queue()
2201 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) in ntb_transport_free_queue()
2204 qp->transport->qp_bitmap_free |= qp_bit; in ntb_transport_free_queue()
2206 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); in ntb_transport_free_queue()
2211 * ntb_transport_rx_remove - Dequeues enqueued rx packet
2225 if (!qp || qp->client_ready) in ntb_transport_rx_remove()
2228 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); in ntb_transport_rx_remove()
2232 buf = entry->cb_data; in ntb_transport_rx_remove()
2233 *len = entry->len; in ntb_transport_rx_remove()
2235 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); in ntb_transport_rx_remove()
2242 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
2243 * @qp: NTB transport layer queue the entry is to be enqueued on
2244 * @cb: per buffer pointer for callback function to use
2245 * @data: pointer to data buffer that incoming packets will be copied into
2246 * @len: length of the data buffer
2248 * Enqueue a new receive buffer onto the transport queue into which a NTB
2251 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2259 return -EINVAL; in ntb_transport_rx_enqueue()
2261 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); in ntb_transport_rx_enqueue()
2263 return -ENOMEM; in ntb_transport_rx_enqueue()
2265 entry->cb_data = cb; in ntb_transport_rx_enqueue()
2266 entry->buf = data; in ntb_transport_rx_enqueue()
2267 entry->len = len; in ntb_transport_rx_enqueue()
2268 entry->flags = 0; in ntb_transport_rx_enqueue()
2269 entry->retries = 0; in ntb_transport_rx_enqueue()
2270 entry->errors = 0; in ntb_transport_rx_enqueue()
2271 entry->rx_index = 0; in ntb_transport_rx_enqueue()
2273 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); in ntb_transport_rx_enqueue()
2275 if (qp->active) in ntb_transport_rx_enqueue()
2276 tasklet_schedule(&qp->rxc_db_work); in ntb_transport_rx_enqueue()
2283 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
2284 * @qp: NTB transport layer queue the entry is to be enqueued on
2285 * @cb: per buffer pointer for callback function to use
2286 * @data: pointer to data buffer that will be sent
2287 * @len: length of the data buffer
2289 * Enqueue a new transmit buffer onto the transport queue from which a NTB
2293 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2302 return -EINVAL; in ntb_transport_tx_enqueue()
2305 if (!qp->link_is_up) in ntb_transport_tx_enqueue()
2308 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); in ntb_transport_tx_enqueue()
2310 qp->tx_err_no_buf++; in ntb_transport_tx_enqueue()
2311 return -EBUSY; in ntb_transport_tx_enqueue()
2314 entry->cb_data = cb; in ntb_transport_tx_enqueue()
2315 entry->buf = data; in ntb_transport_tx_enqueue()
2316 entry->len = len; in ntb_transport_tx_enqueue()
2317 entry->flags = 0; in ntb_transport_tx_enqueue()
2318 entry->errors = 0; in ntb_transport_tx_enqueue()
2319 entry->retries = 0; in ntb_transport_tx_enqueue()
2320 entry->tx_index = 0; in ntb_transport_tx_enqueue()
2324 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, in ntb_transport_tx_enqueue()
2325 &qp->tx_free_q); in ntb_transport_tx_enqueue()
2332 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
2333 * @qp: NTB transport layer queue to be enabled
2335 * Notify NTB transport layer of client readiness to use queue
2342 qp->client_ready = true; in ntb_transport_link_up()
2344 if (qp->transport->link_is_up) in ntb_transport_link_up()
2345 schedule_delayed_work(&qp->link_work, 0); in ntb_transport_link_up()
2350 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
2351 * @qp: NTB transport layer queue to be disabled
2353 * Notify NTB transport layer of client's desire to no longer receive data on
2364 qp->client_ready = false; in ntb_transport_link_down()
2366 val = ntb_spad_read(qp->ndev, QP_LINKS); in ntb_transport_link_down()
2368 ntb_peer_spad_write(qp->ndev, PIDX, QP_LINKS, val & ~BIT(qp->qp_num)); in ntb_transport_link_down()
2370 if (qp->link_is_up) in ntb_transport_link_down()
2373 cancel_delayed_work_sync(&qp->link_work); in ntb_transport_link_down()
2378 * ntb_transport_link_query - Query transport link state
2379 * @qp: NTB transport layer queue to be queried
2390 return qp->link_is_up; in ntb_transport_link_query()
2395 * ntb_transport_qp_num - Query the qp number
2396 * @qp: NTB transport layer queue to be queried
2407 return qp->qp_num; in ntb_transport_qp_num()
2412 * ntb_transport_max_size - Query the max payload size of a qp
2413 * @qp: NTB transport layer queue to be queried
2428 rx_chan = qp->rx_dma_chan; in ntb_transport_max_size()
2429 tx_chan = qp->tx_dma_chan; in ntb_transport_max_size()
2431 copy_align = max(rx_chan ? rx_chan->device->copy_align : 0, in ntb_transport_max_size()
2432 tx_chan ? tx_chan->device->copy_align : 0); in ntb_transport_max_size()
2435 max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header); in ntb_transport_max_size()
2444 unsigned int head = qp->tx_index; in ntb_transport_tx_free_entry()
2445 unsigned int tail = qp->remote_rx_info->entry; in ntb_transport_tx_free_entry()
2447 return tail >= head ? tail - head : qp->tx_max_entry + tail - head; in ntb_transport_tx_free_entry()
2458 if (ntb_db_read(nt->ndev) & nt->msi_db_mask) { in ntb_transport_doorbell_callback()
2460 ntb_db_clear(nt->ndev, nt->msi_db_mask); in ntb_transport_doorbell_callback()
2463 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free & in ntb_transport_doorbell_callback()
2464 ntb_db_vector_mask(nt->ndev, vector)); in ntb_transport_doorbell_callback()
2468 qp = &nt->qp_vec[qp_num]; in ntb_transport_doorbell_callback()
2470 if (qp->active) in ntb_transport_doorbell_callback()
2471 tasklet_schedule(&qp->rxc_db_work); in ntb_transport_doorbell_callback()