Lines Matching +full:rpm +full:- +full:stats

1 // SPDX-License-Identifier: GPL-2.0
24 #define DRV_NAME "Marvell-CGX/RPM"
25 #define DRV_STRING "Marvell CGX/RPM Driver"
80 return (cgx->pdev->device == PCI_DEVID_CN10K_RPM) || in is_dev_rpm()
81 (cgx->pdev->device == PCI_DEVID_CN10KB_RPM); in is_dev_rpm()
86 if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac) in is_lmac_valid()
88 return test_bit(lmac_id, &cgx->lmac_bmap); in is_lmac_valid()
98 for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { in get_sequence_id_of_lmac()
112 return ((struct cgx *)cgxd)->mac_ops; in get_mac_ops()
117 return ((struct cgx *)cgxd)->fifo_len; in cgx_get_fifo_len()
122 writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + in cgx_write()
128 return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + in cgx_read()
134 if (!cgx || lmac_id >= cgx->max_lmac_per_mac) in lmac_pdata()
137 return cgx->lmac_idmap[lmac_id]; in lmac_pdata()
143 int idmax = -ENODEV; in cgx_get_cgxcnt_max()
146 if (cgx_dev->cgx_id > idmax) in cgx_get_cgxcnt_max()
147 idmax = cgx_dev->cgx_id; in cgx_get_cgxcnt_max()
160 return -ENODEV; in cgx_get_lmac_cnt()
162 return cgx->lmac_count; in cgx_get_lmac_cnt()
170 if (cgx_dev->cgx_id == cgx_id) in cgx_get_pdata()
202 return -EINVAL; in cgx_get_cgxid()
204 return cgx->cgx_id; in cgx_get_cgxid()
223 if (cgx->pdev->subsystem_device != PCI_SUBSYS_DEVID_98XX) in cgx_get_nix_resetbit()
226 first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); in cgx_get_nix_resetbit()
227 p2x = cgx_lmac_get_p2x(cgx->cgx_id, first_lmac); in cgx_get_nix_resetbit()
246 return -ENODEV; in cgx_get_link_info()
248 *linfo = lmac->link_info; in cgx_get_link_info()
261 return -ENODEV; in cgx_lmac_addr_set()
264 mac_ops = cgx_dev->mac_ops; in cgx_lmac_addr_set()
273 index = id * lmac->mac_to_index_bmap.max; in cgx_lmac_addr_set()
296 mac_ops = cgx->mac_ops; in cgx_read_dmac_ctrl()
310 mac_ops = cgx->mac_ops; in cgx_read_dmac_entry()
324 return -ENODEV; in cgx_lmac_addr_add()
326 mac_ops = cgx_dev->mac_ops; in cgx_lmac_addr_add()
328 idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap); in cgx_lmac_addr_add()
334 index = id * lmac->mac_to_index_bmap.max + idx; in cgx_lmac_addr_add()
347 lmac->mcast_filters_count++; in cgx_lmac_addr_add()
348 } else if (!lmac->mcast_filters_count) { in cgx_lmac_addr_add()
366 return -ENODEV; in cgx_lmac_addr_reset()
368 mac_ops = cgx_dev->mac_ops; in cgx_lmac_addr_reset()
372 set_bit(0, lmac->mac_to_index_bmap.bmap); in cgx_lmac_addr_reset()
376 index = id * lmac->mac_to_index_bmap.max + index; in cgx_lmac_addr_reset()
402 return -ENODEV; in cgx_lmac_addr_update()
404 mac_ops = cgx_dev->mac_ops; in cgx_lmac_addr_update()
406 if (index >= lmac->mac_to_index_bmap.max) in cgx_lmac_addr_update()
407 return -EINVAL; in cgx_lmac_addr_update()
410 if (!test_bit(index, lmac->mac_to_index_bmap.bmap)) in cgx_lmac_addr_update()
411 return -EINVAL; in cgx_lmac_addr_update()
415 index = id * lmac->mac_to_index_bmap.max + index; in cgx_lmac_addr_update()
435 return -ENODEV; in cgx_lmac_addr_del()
437 mac_ops = cgx_dev->mac_ops; in cgx_lmac_addr_del()
439 if (index >= lmac->mac_to_index_bmap.max) in cgx_lmac_addr_del()
440 return -EINVAL; in cgx_lmac_addr_del()
446 rvu_free_rsrc(&lmac->mac_to_index_bmap, index); in cgx_lmac_addr_del()
450 index = id * lmac->mac_to_index_bmap.max + index; in cgx_lmac_addr_del()
457 lmac->mcast_filters_count--; in cgx_lmac_addr_del()
459 if (!lmac->mcast_filters_count) { in cgx_lmac_addr_del()
477 return lmac->mac_to_index_bmap.max; in cgx_lmac_addr_max_entries_get()
491 mac_ops = cgx_dev->mac_ops; in cgx_lmac_addr_get()
495 index = id * lmac->mac_to_index_bmap.max; in cgx_lmac_addr_get()
506 return -ENODEV; in cgx_set_pkind()
508 cgx_write(cgx, lmac_id, cgx->mac_ops->rxid_map_offset, (pkind & 0x3F)); in cgx_set_pkind()
527 fifo_len = cgx->fifo_len; in cgx_get_lmac_fifo_len()
528 num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx); in cgx_get_lmac_fifo_len()
555 return -ENODEV; in cgx_lmac_internal_loopback()
558 if (lmac->lmac_type == LMAC_MODE_SGMII || in cgx_lmac_internal_loopback()
559 lmac->lmac_type == LMAC_MODE_QSGMII) { in cgx_lmac_internal_loopback()
590 max_dmac = lmac->mac_to_index_bmap.max; in cgx_lmac_promisc_config()
593 mac_ops = cgx->mac_ops; in cgx_lmac_promisc_config()
639 return -ENODEV; in cgx_lmac_get_pause_frm_status()
666 if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) in cgx_lmac_enadis_rx_pause_fwding()
709 return -ENODEV; in cgx_get_rx_stats()
719 return -ENODEV; in cgx_get_tx_stats()
726 return ((struct cgx *)cgxd)->hw_features; in cgx_features_get()
735 return -ENODEV; in cgx_stats_reset()
739 /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */ in cgx_stats_reset()
755 if (!linfo->fec) in cgx_set_fec_stats_count()
758 switch (linfo->lmac_type_id) { in cgx_set_fec_stats_count()
772 if (linfo->fec == OTX2_FEC_BASER) in cgx_set_fec_stats_count()
783 int stats, fec_stats_count = 0; in cgx_get_fec_stats() local
788 return -ENODEV; in cgx_get_fec_stats()
790 if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE) in cgx_get_fec_stats()
794 cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info); in cgx_get_fec_stats()
795 if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) { in cgx_get_fec_stats()
802 for (stats = 0; stats < fec_stats_count; stats++) { in cgx_get_fec_stats()
803 rsp->fec_corr_blks += in cgx_get_fec_stats()
804 cgx_read(cgx, lmac_id, corr_reg + (stats * 8)); in cgx_get_fec_stats()
805 rsp->fec_uncorr_blks += in cgx_get_fec_stats()
806 cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8)); in cgx_get_fec_stats()
817 return -ENODEV; in cgx_lmac_rx_tx_enable()
834 return -ENODEV; in cgx_lmac_tx_enable()
858 return -ENODEV; in cgx_lmac_enadis_pause_frm()
946 return -ENODEV; in verify_lmac_fc_cfg()
949 clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap); in verify_lmac_fc_cfg()
951 set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap); in verify_lmac_fc_cfg()
954 clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap); in verify_lmac_fc_cfg()
956 set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap); in verify_lmac_fc_cfg()
959 if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) { in verify_lmac_fc_cfg()
960 dev_warn(&cgx->pdev->dev, in verify_lmac_fc_cfg()
962 return -EPERM; in verify_lmac_fc_cfg()
965 if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) { in verify_lmac_fc_cfg()
966 dev_warn(&cgx->pdev->dev, in verify_lmac_fc_cfg()
968 return -EPERM; in verify_lmac_fc_cfg()
981 return -ENODEV; in cgx_lmac_pfc_config()
1011 cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id); in cgx_lmac_pfc_config()
1024 return -ENODEV; in cgx_lmac_get_pfc_frm_cfg()
1066 struct cgx *cgx = lmac->cgx; in cgx_fwi_cmd_send()
1072 err = mutex_lock_interruptible(&lmac->cmd_lock); in cgx_fwi_cmd_send()
1077 cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG); in cgx_fwi_cmd_send()
1079 err = -EBUSY; in cgx_fwi_cmd_send()
1087 lmac->cmd_pend = true; in cgx_fwi_cmd_send()
1090 cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req); in cgx_fwi_cmd_send()
1093 if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend, in cgx_fwi_cmd_send()
1095 dev = &cgx->pdev->dev; in cgx_fwi_cmd_send()
1097 cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req)); in cgx_fwi_cmd_send()
1104 *resp = lmac->resp; in cgx_fwi_cmd_send()
1107 mutex_unlock(&lmac->cmd_lock); in cgx_fwi_cmd_send()
1119 return -ENODEV; in cgx_fwi_cmd_generic()
1126 return -EIO; in cgx_fwi_cmd_generic()
1173 if (args->duplex == DUPLEX_UNKNOWN) in set_mod_args()
1174 args->duplex = duplex; in set_mod_args()
1175 if (args->speed == SPEED_UNKNOWN) in set_mod_args()
1176 args->speed = speed; in set_mod_args()
1177 if (args->an == AUTONEG_UNKNOWN) in set_mod_args()
1178 args->an = autoneg; in set_mod_args()
1179 args->mode = mode; in set_mod_args()
1180 args->ports = 0; in set_mod_args()
1275 linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); in link_status_user_format()
1276 linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); in link_status_user_format()
1277 linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)]; in link_status_user_format()
1278 linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat); in link_status_user_format()
1279 linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat); in link_status_user_format()
1280 linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat); in link_status_user_format()
1282 if (linfo->lmac_type_id >= LMAC_MODE_MAX) { in link_status_user_format()
1283 dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d", in link_status_user_format()
1284 linfo->lmac_type_id, cgx->cgx_id, lmac_id); in link_status_user_format()
1285 strscpy(linfo->lmac_type, "Unknown", sizeof(linfo->lmac_type)); in link_status_user_format()
1289 strscpy(linfo->lmac_type, cgx_lmactype_string[linfo->lmac_type_id], in link_status_user_format()
1290 sizeof(linfo->lmac_type)); in link_status_user_format()
1298 struct cgx *cgx = lmac->cgx; in cgx_link_change_handler()
1303 dev = &cgx->pdev->dev; in cgx_link_change_handler()
1305 link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id); in cgx_link_change_handler()
1308 event.cgx_id = cgx->cgx_id; in cgx_link_change_handler()
1309 event.lmac_id = lmac->lmac_id; in cgx_link_change_handler()
1312 lmac->link_info = event.link_uinfo; in cgx_link_change_handler()
1313 linfo = &lmac->link_info; in cgx_link_change_handler()
1319 spin_lock(&lmac->event_cb_lock); in cgx_link_change_handler()
1321 if (!lmac->event_cb.notify_link_chg) { in cgx_link_change_handler()
1323 cgx->cgx_id, lmac->lmac_id); in cgx_link_change_handler()
1326 cgx->cgx_id, lmac->lmac_id, err_type); in cgx_link_change_handler()
1329 cgx->cgx_id, lmac->lmac_id, in cgx_link_change_handler()
1330 linfo->link_up ? "UP" : "DOWN", linfo->speed); in cgx_link_change_handler()
1334 if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data)) in cgx_link_change_handler()
1337 spin_unlock(&lmac->event_cb_lock); in cgx_link_change_handler()
1367 cgx = lmac->cgx; in cgx_fwi_event_handler()
1369 /* Clear SW_INT for RPM and CMR_INT for CGX */ in cgx_fwi_event_handler()
1370 offset = cgx->mac_ops->int_register; in cgx_fwi_event_handler()
1371 clear_bit = cgx->mac_ops->int_ena_bit; in cgx_fwi_event_handler()
1373 event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG); in cgx_fwi_event_handler()
1383 lmac->resp = event; in cgx_fwi_event_handler()
1394 lmac->cmd_pend = false; in cgx_fwi_event_handler()
1395 wake_up(&lmac->wq_cmd_cmplt); in cgx_fwi_event_handler()
1407 cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0); in cgx_fwi_event_handler()
1408 cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit); in cgx_fwi_event_handler()
1423 return -ENODEV; in cgx_lmac_evh_register()
1425 lmac->event_cb = *cb; in cgx_lmac_evh_register()
1438 return -ENODEV; in cgx_lmac_evh_unregister()
1440 spin_lock_irqsave(&lmac->event_cb_lock, flags); in cgx_lmac_evh_unregister()
1441 lmac->event_cb.notify_link_chg = NULL; in cgx_lmac_evh_unregister()
1442 lmac->event_cb.data = NULL; in cgx_lmac_evh_unregister()
1443 spin_unlock_irqrestore(&lmac->event_cb_lock, flags); in cgx_lmac_evh_unregister()
1457 return -ENXIO; in cgx_get_fwdata_base()
1459 first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); in cgx_get_fwdata_base()
1475 return -ENODEV; in cgx_set_link_mode()
1480 return -EINVAL; in cgx_set_link_mode()
1500 return -ENXIO; in cgx_set_fec()
1508 cgx->lmac_idmap[lmac_id]->link_info.fec = in cgx_set_fec()
1510 return cgx->lmac_idmap[lmac_id]->link_info.fec; in cgx_set_fec()
1519 return -ENODEV; in cgx_get_phy_fec_stats()
1548 int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); in cgx_fwi_read_version()
1557 struct device *dev = &cgx->pdev->dev; in cgx_lmac_verify_fwi_version()
1562 if (!cgx->lmac_count) in cgx_lmac_verify_fwi_version()
1574 return -EIO; in cgx_lmac_verify_fwi_version()
1582 struct device *dev = &cgx->pdev->dev; in cgx_lmac_linkup_work()
1586 for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { in cgx_lmac_linkup_work()
1590 cgx->cgx_id, i); in cgx_lmac_linkup_work()
1599 return -ENODEV; in cgx_lmac_linkup_start()
1601 queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work); in cgx_lmac_linkup_start()
1612 return -ENODEV; in cgx_lmac_reset()
1626 struct mac_ops *mac_ops = cgx->mac_ops; in cgx_configure_interrupt()
1631 irq = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi + in cgx_configure_interrupt()
1632 cnt * mac_ops->irq_offset); in cgx_configure_interrupt()
1633 offset = mac_ops->int_set_reg; in cgx_configure_interrupt()
1634 ena_bit = mac_ops->int_ena_bit; in cgx_configure_interrupt()
1641 err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac); in cgx_configure_interrupt()
1646 cgx_write(cgx, lmac->lmac_id, offset, ena_bit); in cgx_configure_interrupt()
1661 return cgx->lmac_idmap[lmac_index]->lmac_id; in cgx_get_lmacid()
1668 return cgx->lmac_bmap; in cgx_get_lmac_bmap()
1680 if (cgx->mac_ops->non_contiguous_serdes_lane) { in cgx_lmac_init()
1689 if (cgx->lmac_count > cgx->max_lmac_per_mac) in cgx_lmac_init()
1690 cgx->lmac_count = cgx->max_lmac_per_mac; in cgx_lmac_init()
1692 for (i = 0; i < cgx->lmac_count; i++) { in cgx_lmac_init()
1695 return -ENOMEM; in cgx_lmac_init()
1696 lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL); in cgx_lmac_init()
1697 if (!lmac->name) { in cgx_lmac_init()
1698 err = -ENOMEM; in cgx_lmac_init()
1701 sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); in cgx_lmac_init()
1702 if (cgx->mac_ops->non_contiguous_serdes_lane) { in cgx_lmac_init()
1703 lmac->lmac_id = __ffs64(lmac_list); in cgx_lmac_init()
1704 lmac_list &= ~BIT_ULL(lmac->lmac_id); in cgx_lmac_init()
1706 lmac->lmac_id = i; in cgx_lmac_init()
1709 lmac->cgx = cgx; in cgx_lmac_init()
1710 lmac->mac_to_index_bmap.max = in cgx_lmac_init()
1711 cgx->mac_ops->dmac_filter_count / in cgx_lmac_init()
1712 cgx->lmac_count; in cgx_lmac_init()
1714 err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap); in cgx_lmac_init()
1719 set_bit(0, lmac->mac_to_index_bmap.bmap); in cgx_lmac_init()
1721 lmac->rx_fc_pfvf_bmap.max = 128; in cgx_lmac_init()
1722 err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap); in cgx_lmac_init()
1726 lmac->tx_fc_pfvf_bmap.max = 128; in cgx_lmac_init()
1727 err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap); in cgx_lmac_init()
1731 init_waitqueue_head(&lmac->wq_cmd_cmplt); in cgx_lmac_init()
1732 mutex_init(&lmac->cmd_lock); in cgx_lmac_init()
1733 spin_lock_init(&lmac->event_cb_lock); in cgx_lmac_init()
1734 err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false); in cgx_lmac_init()
1739 cgx->lmac_idmap[lmac->lmac_id] = lmac; in cgx_lmac_init()
1740 set_bit(lmac->lmac_id, &cgx->lmac_bmap); in cgx_lmac_init()
1741 cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true); in cgx_lmac_init()
1742 lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id); in cgx_lmac_init()
1746 cgx->mac_ops->mac_x2p_reset(cgx, true); in cgx_lmac_init()
1750 rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap); in cgx_lmac_init()
1752 rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap); in cgx_lmac_init()
1754 rvu_free_bitmap(&lmac->mac_to_index_bmap); in cgx_lmac_init()
1756 kfree(lmac->name); in cgx_lmac_init()
1767 if (cgx->cgx_cmd_workq) { in cgx_lmac_exit()
1768 destroy_workqueue(cgx->cgx_cmd_workq); in cgx_lmac_exit()
1769 cgx->cgx_cmd_workq = NULL; in cgx_lmac_exit()
1773 for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { in cgx_lmac_exit()
1774 lmac = cgx->lmac_idmap[i]; in cgx_lmac_exit()
1777 cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false); in cgx_lmac_exit()
1778 cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true); in cgx_lmac_exit()
1779 kfree(lmac->mac_to_index_bmap.bmap); in cgx_lmac_exit()
1780 kfree(lmac->name); in cgx_lmac_exit()
1792 cgx->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); in cgx_populate_features()
1793 cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg); in cgx_populate_features()
1796 cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM | in cgx_populate_features()
1799 cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 | in cgx_populate_features()
1805 if (cgx->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10KB_RPM || in cgx_get_rxid_mapoffset()
1819 for_each_set_bit(lmac_id, &cgx->lmac_bmap, cgx->max_lmac_per_mac) in cgx_x2p_reset()
1820 cgx->mac_ops->mac_enadis_rx(cgx, lmac_id, false); in cgx_x2p_reset()
1840 return -ENODEV; in cgx_enadis_rx()
1888 struct device *dev = &pdev->dev; in cgx_probe()
1894 return -ENOMEM; in cgx_probe()
1895 cgx->pdev = pdev; in cgx_probe()
1901 cgx->mac_ops = rpm_get_mac_ops(cgx); in cgx_probe()
1903 cgx->mac_ops = &cgx_mac_ops; in cgx_probe()
1905 cgx->mac_ops->rxid_map_offset = cgx_get_rxid_mapoffset(cgx); in cgx_probe()
1921 cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); in cgx_probe()
1922 if (!cgx->reg_base) { in cgx_probe()
1924 err = -ENOMEM; in cgx_probe()
1928 cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx); in cgx_probe()
1929 if (!cgx->lmac_count) { in cgx_probe()
1930 dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id); in cgx_probe()
1931 err = -EOPNOTSUPP; in cgx_probe()
1935 nvec = pci_msix_vec_count(cgx->pdev); in cgx_probe()
1943 cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) in cgx_probe()
1947 INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work); in cgx_probe()
1948 cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0); in cgx_probe()
1949 if (!cgx->cgx_cmd_workq) { in cgx_probe()
1951 err = -ENOMEM; in cgx_probe()
1955 list_add(&cgx->cgx_list, &cgx_list); in cgx_probe()
1960 mutex_init(&cgx->lock); in cgx_probe()
1970 list_del(&cgx->cgx_list); in cgx_probe()
1987 list_del(&cgx->cgx_list); in cgx_remove()