Lines Matching +full:dp +full:- +full:phy1

3  * Copyright (c) 2007-2013 Broadcom Corporation
36 #include <linux/dma-mapping.h>
83 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
84 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
85 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
86 #define FW_FILE_NAME_E1_V15 "bnx2x/bnx2x-e1-" FW_FILE_VERSION_V15 ".fw"
87 #define FW_FILE_NAME_E1H_V15 "bnx2x/bnx2x-e1h-" FW_FILE_VERSION_V15 ".fw"
88 #define FW_FILE_NAME_E2_V15 "bnx2x/bnx2x-e2-" FW_FILE_VERSION_V15 ".fw"
117 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
124 static int mrrs = -1;
379 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); in bnx2x_reg_wr_ind()
380 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); in bnx2x_reg_wr_ind()
381 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, in bnx2x_reg_wr_ind()
389 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); in bnx2x_reg_rd_ind()
390 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); in bnx2x_reg_rd_ind()
391 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, in bnx2x_reg_rd_ind()
406 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; in bnx2x_dp_dmae()
409 switch (dmae->opcode & DMAE_COMMAND_DST) { in bnx2x_dp_dmae()
412 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
415 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, in bnx2x_dp_dmae()
416 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, in bnx2x_dp_dmae()
417 dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
418 dmae->comp_val); in bnx2x_dp_dmae()
420 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
423 dmae->opcode, dmae->src_addr_lo >> 2, in bnx2x_dp_dmae()
424 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, in bnx2x_dp_dmae()
425 dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
426 dmae->comp_val); in bnx2x_dp_dmae()
430 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
433 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, in bnx2x_dp_dmae()
434 dmae->len, dmae->dst_addr_lo >> 2, in bnx2x_dp_dmae()
435 dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
436 dmae->comp_val); in bnx2x_dp_dmae()
438 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
441 dmae->opcode, dmae->src_addr_lo >> 2, in bnx2x_dp_dmae()
442 dmae->len, dmae->dst_addr_lo >> 2, in bnx2x_dp_dmae()
443 dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
444 dmae->comp_val); in bnx2x_dp_dmae()
448 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
451 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, in bnx2x_dp_dmae()
452 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
453 dmae->comp_val); in bnx2x_dp_dmae()
455 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
458 dmae->opcode, dmae->src_addr_lo >> 2, in bnx2x_dp_dmae()
459 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
460 dmae->comp_val); in bnx2x_dp_dmae()
465 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n", in bnx2x_dp_dmae()
525 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, in bnx2x_prep_dmae_with_comp()
529 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); in bnx2x_prep_dmae_with_comp()
530 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); in bnx2x_prep_dmae_with_comp()
531 dmae->comp_val = DMAE_COMP_VAL; in bnx2x_prep_dmae_with_comp()
534 /* issue a dmae command over the init-channel and wait for completion */
543 /* Lock the dmae channel. Disable BHs to prevent a dead-lock in bnx2x_issue_dmae_with_comp()
548 spin_lock_bh(&bp->dmae_lock); in bnx2x_issue_dmae_with_comp()
561 (bp->recovery_state != BNX2X_RECOVERY_DONE && in bnx2x_issue_dmae_with_comp()
562 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { in bnx2x_issue_dmae_with_comp()
567 cnt--; in bnx2x_issue_dmae_with_comp()
577 spin_unlock_bh(&bp->dmae_lock); in bnx2x_issue_dmae_with_comp()
588 if (!bp->dmae_ready) { in bnx2x_write_dmae()
623 if (!bp->dmae_ready) { in bnx2x_read_dmae()
667 len -= dmae_wr_max; in bnx2x_write_dmae_phys_len()
701 return -EINVAL; in bnx2x_get_assert_list_entry()
762 bp->fw_major, bp->fw_minor, bp->fw_rev); in bnx2x_mc_assert()
779 BNX2X_ERR("NO MCP - can not dump\n"); in bnx2x_fw_dump_lvl()
782 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", in bnx2x_fw_dump_lvl()
783 (bp->common.bc_ver & 0xff0000) >> 16, in bnx2x_fw_dump_lvl()
784 (bp->common.bc_ver & 0xff00) >> 8, in bnx2x_fw_dump_lvl()
785 (bp->common.bc_ver & 0xff)); in bnx2x_fw_dump_lvl()
787 if (pci_channel_offline(bp->pdev)) { in bnx2x_fw_dump_lvl()
797 trace_shmem_base = bp->common.shmem_base; in bnx2x_fw_dump_lvl()
810 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE; in bnx2x_fw_dump_lvl()
822 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000; in bnx2x_fw_dump_lvl()
880 DP(NETIF_MSG_IFDOWN, in bnx2x_hc_int_disable()
897 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); in bnx2x_igu_int_disable()
906 if (bp->common.int_block == INT_BLOCK_HC) in bnx2x_int_disable()
925 bp->stats_state = STATS_STATE_DISABLED; in bnx2x_panic_dump()
926 bp->eth_stats.unrecoverable_error++; in bnx2x_panic_dump()
927 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); in bnx2x_panic_dump()
929 BNX2X_ERR("begin crash dump -----------------\n"); in bnx2x_panic_dump()
934 struct host_sp_status_block *def_sb = bp->def_status_blk; in bnx2x_panic_dump()
938 bp->def_idx, bp->def_att_idx, bp->attn_state, in bnx2x_panic_dump()
939 bp->spq_prod_idx, bp->stats_counter); in bnx2x_panic_dump()
941 def_sb->atten_status_block.attn_bits, in bnx2x_panic_dump()
942 def_sb->atten_status_block.attn_bits_ack, in bnx2x_panic_dump()
943 def_sb->atten_status_block.status_block_id, in bnx2x_panic_dump()
944 def_sb->atten_status_block.attn_bits_index); in bnx2x_panic_dump()
948 def_sb->sp_sb.index_values[i], in bnx2x_panic_dump()
949 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); in bnx2x_panic_dump()
970 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_panic_dump()
986 if (!bp->fp) in bnx2x_panic_dump()
989 if (!fp->rx_cons_sb) in bnx2x_panic_dump()
994 i, fp->rx_bd_prod, fp->rx_bd_cons, in bnx2x_panic_dump()
995 fp->rx_comp_prod, in bnx2x_panic_dump()
996 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); in bnx2x_panic_dump()
998 fp->rx_sge_prod, fp->last_max_sge, in bnx2x_panic_dump()
999 le16_to_cpu(fp->fp_hc_idx)); in bnx2x_panic_dump()
1004 if (!fp->txdata_ptr[cos]) in bnx2x_panic_dump()
1007 txdata = *fp->txdata_ptr[cos]; in bnx2x_panic_dump()
1030 fp->sb_running_index[j], in bnx2x_panic_dump()
1031 (j == HC_SB_MAX_SM - 1) ? ")" : " "); in bnx2x_panic_dump()
1036 fp->sb_index_values[j], in bnx2x_panic_dump()
1037 (j == loop - 1) ? ")" : " "); in bnx2x_panic_dump()
1054 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + in bnx2x_panic_dump()
1096 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); in bnx2x_panic_dump()
1098 u32 *data = (u32 *)&bp->eq_ring[i].message.data; in bnx2x_panic_dump()
1101 i, bp->eq_ring[i].message.opcode, in bnx2x_panic_dump()
1102 bp->eq_ring[i].message.error); in bnx2x_panic_dump()
1111 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_panic_dump()
1113 if (!bp->fp) in bnx2x_panic_dump()
1116 if (!fp->rx_cons_sb) in bnx2x_panic_dump()
1119 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); in bnx2x_panic_dump()
1120 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); in bnx2x_panic_dump()
1122 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; in bnx2x_panic_dump()
1123 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; in bnx2x_panic_dump()
1126 i, j, rx_bd[1], rx_bd[0], sw_bd->data); in bnx2x_panic_dump()
1129 start = RX_SGE(fp->rx_sge_prod); in bnx2x_panic_dump()
1130 end = RX_SGE(fp->last_max_sge); in bnx2x_panic_dump()
1132 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; in bnx2x_panic_dump()
1133 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; in bnx2x_panic_dump()
1136 i, j, rx_sge[1], rx_sge[0], sw_page->page); in bnx2x_panic_dump()
1139 start = RCQ_BD(fp->rx_comp_cons - 10); in bnx2x_panic_dump()
1140 end = RCQ_BD(fp->rx_comp_cons + 503); in bnx2x_panic_dump()
1142 u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; in bnx2x_panic_dump()
1151 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_panic_dump()
1153 if (!bp->fp) in bnx2x_panic_dump()
1157 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_panic_dump()
1159 if (!fp->txdata_ptr[cos]) in bnx2x_panic_dump()
1162 if (!txdata->tx_cons_sb) in bnx2x_panic_dump()
1165 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); in bnx2x_panic_dump()
1166 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); in bnx2x_panic_dump()
1169 &txdata->tx_buf_ring[j]; in bnx2x_panic_dump()
1172 i, cos, j, sw_bd->skb, in bnx2x_panic_dump()
1173 sw_bd->first_bd); in bnx2x_panic_dump()
1176 start = TX_BD(txdata->tx_bd_cons - 10); in bnx2x_panic_dump()
1177 end = TX_BD(txdata->tx_bd_cons + 254); in bnx2x_panic_dump()
1179 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; in bnx2x_panic_dump()
1189 int tmp_msg_en = bp->msg_enable; in bnx2x_panic_dump()
1192 bp->msg_enable |= NETIF_MSG_HW; in bnx2x_panic_dump()
1193 BNX2X_ERR("Idle check (1st round) ----------\n"); in bnx2x_panic_dump()
1195 BNX2X_ERR("Idle check (2nd round) ----------\n"); in bnx2x_panic_dump()
1197 bp->msg_enable = tmp_msg_en; in bnx2x_panic_dump()
1201 BNX2X_ERR("end crash dump -----------------\n"); in bnx2x_panic_dump()
1234 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); in bnx2x_pbf_pN_buf_flushed()
1235 crd = crd_start = REG_RD(bp, regs->crd); in bnx2x_pbf_pN_buf_flushed()
1236 init_crd = REG_RD(bp, regs->init_crd); in bnx2x_pbf_pN_buf_flushed()
1238 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); in bnx2x_pbf_pN_buf_flushed()
1239 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); in bnx2x_pbf_pN_buf_flushed()
1240 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); in bnx2x_pbf_pN_buf_flushed()
1243 (init_crd - crd_start))) { in bnx2x_pbf_pN_buf_flushed()
1244 if (cur_cnt--) { in bnx2x_pbf_pN_buf_flushed()
1246 crd = REG_RD(bp, regs->crd); in bnx2x_pbf_pN_buf_flushed()
1247 crd_freed = REG_RD(bp, regs->crd_freed); in bnx2x_pbf_pN_buf_flushed()
1249 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", in bnx2x_pbf_pN_buf_flushed()
1250 regs->pN); in bnx2x_pbf_pN_buf_flushed()
1251 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", in bnx2x_pbf_pN_buf_flushed()
1252 regs->pN, crd); in bnx2x_pbf_pN_buf_flushed()
1253 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", in bnx2x_pbf_pN_buf_flushed()
1254 regs->pN, crd_freed); in bnx2x_pbf_pN_buf_flushed()
1258 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", in bnx2x_pbf_pN_buf_flushed()
1259 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); in bnx2x_pbf_pN_buf_flushed()
1269 occup = to_free = REG_RD(bp, regs->lines_occup); in bnx2x_pbf_pN_cmd_flushed()
1270 freed = freed_start = REG_RD(bp, regs->lines_freed); in bnx2x_pbf_pN_cmd_flushed()
1272 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); in bnx2x_pbf_pN_cmd_flushed()
1273 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); in bnx2x_pbf_pN_cmd_flushed()
1276 if (cur_cnt--) { in bnx2x_pbf_pN_cmd_flushed()
1278 occup = REG_RD(bp, regs->lines_occup); in bnx2x_pbf_pN_cmd_flushed()
1279 freed = REG_RD(bp, regs->lines_freed); in bnx2x_pbf_pN_cmd_flushed()
1281 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", in bnx2x_pbf_pN_cmd_flushed()
1282 regs->pN); in bnx2x_pbf_pN_cmd_flushed()
1283 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", in bnx2x_pbf_pN_cmd_flushed()
1284 regs->pN, occup); in bnx2x_pbf_pN_cmd_flushed()
1285 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", in bnx2x_pbf_pN_cmd_flushed()
1286 regs->pN, freed); in bnx2x_pbf_pN_cmd_flushed()
1290 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", in bnx2x_pbf_pN_cmd_flushed()
1291 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); in bnx2x_pbf_pN_cmd_flushed()
1300 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) in bnx2x_flr_clnup_reg_poll()
1419 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n"); in bnx2x_send_final_clnup()
1424 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", in bnx2x_send_final_clnup()
1447 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ in bnx2x_poll_hw_usage_counters()
1454 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ in bnx2x_poll_hw_usage_counters()
1461 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ in bnx2x_poll_hw_usage_counters()
1468 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ in bnx2x_poll_hw_usage_counters()
1495 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); in bnx2x_hw_enable_status()
1498 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); in bnx2x_hw_enable_status()
1501 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); in bnx2x_hw_enable_status()
1504 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); in bnx2x_hw_enable_status()
1507 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); in bnx2x_hw_enable_status()
1510 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); in bnx2x_hw_enable_status()
1513 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); in bnx2x_hw_enable_status()
1516 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", in bnx2x_hw_enable_status()
1524 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); in bnx2x_pf_flr_clnup()
1526 /* Re-enable PF target read access */ in bnx2x_pf_flr_clnup()
1530 DP(BNX2X_MSG_SP, "Polling usage counters\n"); in bnx2x_pf_flr_clnup()
1532 return -EBUSY; in bnx2x_pf_flr_clnup()
1538 return -EBUSY; in bnx2x_pf_flr_clnup()
1549 if (bnx2x_is_pcie_pending(bp->pdev)) in bnx2x_pf_flr_clnup()
1556 * Master enable - Due to WB DMAE writes performed before this in bnx2x_pf_flr_clnup()
1557 * register is re-initialized as part of the regular function init in bnx2x_pf_flr_clnup()
1569 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; in bnx2x_hc_int_enable()
1570 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; in bnx2x_hc_int_enable()
1571 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; in bnx2x_hc_int_enable()
1592 DP(NETIF_MSG_IFUP, in bnx2x_hc_int_enable()
1604 DP(NETIF_MSG_IFUP, in bnx2x_hc_int_enable()
1606 (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); in bnx2x_hc_int_enable()
1618 if (bp->port.pmf) in bnx2x_hc_int_enable()
1632 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; in bnx2x_igu_int_enable()
1633 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; in bnx2x_igu_int_enable()
1634 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; in bnx2x_igu_int_enable()
1658 /* Clean previous status - need to configure igu prior to ack*/ in bnx2x_igu_int_enable()
1666 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", in bnx2x_igu_int_enable()
1667 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); in bnx2x_igu_int_enable()
1672 pci_intx(bp->pdev, true); in bnx2x_igu_int_enable()
1679 if (bp->port.pmf) in bnx2x_igu_int_enable()
1691 if (bp->common.int_block == INT_BLOCK_HC) in bnx2x_int_enable()
1699 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; in bnx2x_int_disable_sync()
1708 synchronize_irq(bp->msix_table[0].vector); in bnx2x_int_disable_sync()
1713 synchronize_irq(bp->msix_table[offset++].vector); in bnx2x_int_disable_sync()
1715 synchronize_irq(bp->pdev->irq); in bnx2x_int_disable_sync()
1718 cancel_delayed_work(&bp->sp_task); in bnx2x_int_disable_sync()
1719 cancel_delayed_work(&bp->period_task); in bnx2x_int_disable_sync()
1737 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, in bnx2x_trylock_hw_lock()
1742 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, in bnx2x_trylock_hw_lock()
1752 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); in bnx2x_trylock_hw_lock()
1760 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, in bnx2x_trylock_hw_lock()
1766 * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1782 * bnx2x_trylock_leader_lock- try to acquire a leader lock.
1798 /* Set the interrupt occurred bit for the sp-task to recognize it in bnx2x_schedule_sp_task()
1802 atomic_set(&bp->interrupt_occurred, 1); in bnx2x_schedule_sp_task()
1811 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); in bnx2x_schedule_sp_task()
1816 struct bnx2x *bp = fp->bp; in bnx2x_sp_event()
1817 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); in bnx2x_sp_event()
1818 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); in bnx2x_sp_event()
1822 DP(BNX2X_MSG_SP, in bnx2x_sp_event()
1824 fp->index, cid, command, bp->state, in bnx2x_sp_event()
1825 rr_cqe->ramrod_cqe.ramrod_type); in bnx2x_sp_event()
1836 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); in bnx2x_sp_event()
1841 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); in bnx2x_sp_event()
1846 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); in bnx2x_sp_event()
1851 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); in bnx2x_sp_event()
1856 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid); in bnx2x_sp_event()
1861 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); in bnx2x_sp_event()
1866 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid); in bnx2x_sp_event()
1872 command, fp->index); in bnx2x_sp_event()
1877 q_obj->complete_cmd(bp, q_obj, drv_cmd)) in bnx2x_sp_event()
1878 /* q_obj->complete_cmd() failure means that this was in bnx2x_sp_event()
1881 * In this case we don't want to increase the bp->spq_left in bnx2x_sp_event()
1892 atomic_inc(&bp->cq_spq_left); in bnx2x_sp_event()
1893 /* push the change in bp->spq_left and towards the memory */ in bnx2x_sp_event()
1896 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); in bnx2x_sp_event()
1899 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { in bnx2x_sp_event()
1910 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); in bnx2x_sp_event()
1912 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); in bnx2x_sp_event()
1932 DP(NETIF_MSG_INTR, "not our interrupt!\n"); in bnx2x_interrupt()
1935 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); in bnx2x_interrupt()
1938 if (unlikely(bp->panic)) in bnx2x_interrupt()
1943 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_interrupt()
1945 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); in bnx2x_interrupt()
1949 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); in bnx2x_interrupt()
1950 prefetch(&fp->sb_running_index[SM_RX_ID]); in bnx2x_interrupt()
1951 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); in bnx2x_interrupt()
1962 c_ops = rcu_dereference(bp->cnic_ops); in bnx2x_interrupt()
1963 if (c_ops && (bp->cnic_eth_dev.drv_state & in bnx2x_interrupt()
1965 c_ops->cnic_handler(bp->cnic_data, NULL); in bnx2x_interrupt()
1985 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", in bnx2x_interrupt()
2009 return -EINVAL; in bnx2x_acquire_hw_lock()
2016 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); in bnx2x_acquire_hw_lock()
2024 return -EEXIST; in bnx2x_acquire_hw_lock()
2038 return -EAGAIN; in bnx2x_acquire_hw_lock()
2057 return -EINVAL; in bnx2x_release_hw_lock()
2064 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); in bnx2x_release_hw_lock()
2072 return -EFAULT; in bnx2x_release_hw_lock()
2092 return -EINVAL; in bnx2x_get_gpio()
2119 return -EINVAL; in bnx2x_set_gpio()
2128 DP(NETIF_MSG_LINK, in bnx2x_set_gpio()
2129 "Set GPIO %d (shift %d) -> output low\n", in bnx2x_set_gpio()
2137 DP(NETIF_MSG_LINK, in bnx2x_set_gpio()
2138 "Set GPIO %d (shift %d) -> output high\n", in bnx2x_set_gpio()
2146 DP(NETIF_MSG_LINK, in bnx2x_set_gpio()
2147 "Set GPIO %d (shift %d) -> input\n", in bnx2x_set_gpio()
2179 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); in bnx2x_set_mult_gpio()
2185 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); in bnx2x_set_mult_gpio()
2191 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); in bnx2x_set_mult_gpio()
2198 rc = -EINVAL; in bnx2x_set_mult_gpio()
2222 return -EINVAL; in bnx2x_set_gpio_int()
2231 DP(NETIF_MSG_LINK, in bnx2x_set_gpio_int()
2232 "Clear GPIO INT %d (shift %d) -> output low\n", in bnx2x_set_gpio_int()
2240 DP(NETIF_MSG_LINK, in bnx2x_set_gpio_int()
2241 "Set GPIO INT %d (shift %d) -> output high\n", in bnx2x_set_gpio_int()
2265 return -EINVAL; in bnx2x_set_spio()
2274 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio); in bnx2x_set_spio()
2281 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio); in bnx2x_set_spio()
2288 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio); in bnx2x_set_spio()
2307 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | in bnx2x_calc_fc_adv()
2309 switch (bp->link_vars.ieee_fc & in bnx2x_calc_fc_adv()
2312 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | in bnx2x_calc_fc_adv()
2317 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; in bnx2x_calc_fc_adv()
2331 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) in bnx2x_set_requested_fc()
2332 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; in bnx2x_set_requested_fc()
2334 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; in bnx2x_set_requested_fc()
2341 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { in bnx2x_init_dropless_fc()
2342 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) in bnx2x_init_dropless_fc()
2350 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n", in bnx2x_init_dropless_fc()
2357 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; in bnx2x_initial_phy_init()
2364 struct link_params *lp = &bp->link_params; in bnx2x_initial_phy_init()
2365 lp->loopback_mode = LOOPBACK_XGXS; in bnx2x_initial_phy_init()
2367 if (lp->req_line_speed[cfx_idx] < SPEED_20000) { in bnx2x_initial_phy_init()
2368 if (lp->speed_cap_mask[cfx_idx] & in bnx2x_initial_phy_init()
2370 lp->req_line_speed[cfx_idx] = in bnx2x_initial_phy_init()
2372 else if (lp->speed_cap_mask[cfx_idx] & in bnx2x_initial_phy_init()
2374 lp->req_line_speed[cfx_idx] = in bnx2x_initial_phy_init()
2377 lp->req_line_speed[cfx_idx] = in bnx2x_initial_phy_init()
2383 struct link_params *lp = &bp->link_params; in bnx2x_initial_phy_init()
2384 lp->loopback_mode = LOOPBACK_EXT; in bnx2x_initial_phy_init()
2387 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); in bnx2x_initial_phy_init()
2395 if (bp->link_vars.link_up) { in bnx2x_initial_phy_init()
2399 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); in bnx2x_initial_phy_init()
2400 bp->link_params.req_line_speed[cfx_idx] = req_line_speed; in bnx2x_initial_phy_init()
2403 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); in bnx2x_initial_phy_init()
2404 return -EINVAL; in bnx2x_initial_phy_init()
2411 bnx2x_phy_init(&bp->link_params, &bp->link_vars); in bnx2x_link_set()
2418 BNX2X_ERR("Bootcode is missing - can not set link\n"); in bnx2x_link_set()
2425 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); in bnx2x__link_reset()
2428 BNX2X_ERR("Bootcode is missing - can not reset link\n"); in bnx2x__link_reset()
2434 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); in bnx2x_force_link_reset()
2444 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, in bnx2x_link_test()
2448 BNX2X_ERR("Bootcode is missing - can not test link\n"); in bnx2x_link_test()
2458 0 - if all the min_rates are 0.
2469 u32 vn_cfg = bp->mf_config[vn]; in bnx2x_calc_vn_min()
2476 /* If min rate is zero - set it to 1 */ in bnx2x_calc_vn_min()
2482 input->vnic_min_rate[vn] = vn_min_rate; in bnx2x_calc_vn_min()
2485 /* if ETS or all min rates are zeros - disable fairness */ in bnx2x_calc_vn_min()
2487 input->flags.cmng_enables &= in bnx2x_calc_vn_min()
2489 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); in bnx2x_calc_vn_min()
2491 input->flags.cmng_enables &= in bnx2x_calc_vn_min()
2493 DP(NETIF_MSG_IFUP, in bnx2x_calc_vn_min()
2496 input->flags.cmng_enables |= in bnx2x_calc_vn_min()
2504 u32 vn_cfg = bp->mf_config[vn]; in bnx2x_calc_vn_max()
2513 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; in bnx2x_calc_vn_max()
2519 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); in bnx2x_calc_vn_max()
2521 input->vnic_max_rate[vn] = vn_max_rate; in bnx2x_calc_vn_max()
2558 bp->mf_config[vn] = in bnx2x_read_mf_cfg()
2561 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { in bnx2x_read_mf_cfg()
2562 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); in bnx2x_read_mf_cfg()
2563 bp->flags |= MF_FUNC_DIS; in bnx2x_read_mf_cfg()
2565 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); in bnx2x_read_mf_cfg()
2566 bp->flags &= ~MF_FUNC_DIS; in bnx2x_read_mf_cfg()
2575 input.port_rate = bp->link_vars.line_speed; in bnx2x_cmng_fns_init()
2587 /* calculate and set min-max rate for each vn */ in bnx2x_cmng_fns_init()
2588 if (bp->port.pmf) in bnx2x_cmng_fns_init()
2596 bnx2x_init_cmng(&input, &bp->cmng); in bnx2x_cmng_fns_init()
2601 DP(NETIF_MSG_IFUP, in bnx2x_cmng_fns_init()
2615 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); in storm_memset_cmng()
2624 (u32 *)&cmng->vnic.vnic_max_rate[vn]); in storm_memset_cmng()
2630 (u32 *)&cmng->vnic.vnic_min_rate[vn]); in storm_memset_cmng()
2641 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); in bnx2x_set_local_cmng()
2644 DP(NETIF_MSG_IFUP, in bnx2x_set_local_cmng()
2655 bnx2x_link_update(&bp->link_params, &bp->link_vars); in bnx2x_link_attn()
2659 if (bp->link_vars.link_up) { in bnx2x_link_attn()
2661 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { in bnx2x_link_attn()
2666 memset(&(pstats->mac_stx[0]), 0, in bnx2x_link_attn()
2669 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_link_attn()
2673 if (bp->link_vars.link_up && bp->link_vars.line_speed) in bnx2x_link_attn()
2684 if (bp->state != BNX2X_STATE_OPEN) in bnx2x__link_status_update()
2690 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); in bnx2x__link_status_update()
2691 if (bp->link_vars.link_up) in bnx2x__link_status_update()
2699 bp->port.supported[0] |= (SUPPORTED_10baseT_Half | in bnx2x__link_status_update()
2711 bp->port.advertising[0] = bp->port.supported[0]; in bnx2x__link_status_update()
2713 bp->link_params.bp = bp; in bnx2x__link_status_update()
2714 bp->link_params.port = BP_PORT(bp); in bnx2x__link_status_update()
2715 bp->link_params.req_duplex[0] = DUPLEX_FULL; in bnx2x__link_status_update()
2716 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE; in bnx2x__link_status_update()
2717 bp->link_params.req_line_speed[0] = SPEED_10000; in bnx2x__link_status_update()
2718 bp->link_params.speed_cap_mask[0] = 0x7f0000; in bnx2x__link_status_update()
2719 bp->link_params.switch_cfg = SWITCH_CFG_10G; in bnx2x__link_status_update()
2720 bp->link_vars.mac_type = MAC_TYPE_BMAC; in bnx2x__link_status_update()
2721 bp->link_vars.line_speed = SPEED_10000; in bnx2x__link_status_update()
2722 bp->link_vars.link_status = in bnx2x__link_status_update()
2725 bp->link_vars.link_up = 1; in bnx2x__link_status_update()
2726 bp->link_vars.duplex = DUPLEX_FULL; in bnx2x__link_status_update()
2727 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE; in bnx2x__link_status_update()
2748 func_params.f_obj = &bp->func_obj; in bnx2x_afex_func_update()
2755 f_update_params->vif_id = vifid; in bnx2x_afex_func_update()
2756 f_update_params->afex_default_vlan = vlan_val; in bnx2x_afex_func_update()
2757 f_update_params->allowed_priorities = allowed_prio; in bnx2x_afex_func_update()
2780 func_params.f_obj = &bp->func_obj; in bnx2x_afex_handle_vif_list_cmd()
2784 update_params->afex_vif_list_command = cmd_type; in bnx2x_afex_handle_vif_list_cmd()
2785 update_params->vif_list_index = vif_index; in bnx2x_afex_handle_vif_list_cmd()
2786 update_params->func_bit_map = in bnx2x_afex_handle_vif_list_cmd()
2788 update_params->func_to_clear = 0; in bnx2x_afex_handle_vif_list_cmd()
2818 DP(BNX2X_MSG_MCP, in bnx2x_handle_afex_cmd()
2826 DP(BNX2X_MSG_MCP, in bnx2x_handle_afex_cmd()
2839 DP(BNX2X_MSG_MCP, in bnx2x_handle_afex_cmd()
2856 bp->mf_config[BP_VN(bp)] = mf_config; in bnx2x_handle_afex_cmd()
2857 DP(BNX2X_MSG_MCP, in bnx2x_handle_afex_cmd()
2870 bp->mf_config[BP_VN(bp)] = mf_config; in bnx2x_handle_afex_cmd()
2910 bp->afex_def_vlan_tag = vlan_val; in bnx2x_handle_afex_cmd()
2911 bp->afex_vlan_mode = vlan_mode; in bnx2x_handle_afex_cmd()
2913 /* notify link down because BP->flags is disabled */ in bnx2x_handle_afex_cmd()
2920 bp->afex_def_vlan_tag = -1; in bnx2x_handle_afex_cmd()
2932 func_params.f_obj = &bp->func_obj; in bnx2x_handle_update_svid_cmd()
2943 /* Re-learn the S-tag from shmem */ in bnx2x_handle_update_svid_cmd()
2947 bp->mf_ov = val; in bnx2x_handle_update_svid_cmd()
2953 /* Configure new S-tag in LLH */ in bnx2x_handle_update_svid_cmd()
2955 bp->mf_ov); in bnx2x_handle_update_svid_cmd()
2959 &switch_update_params->changes); in bnx2x_handle_update_svid_cmd()
2960 switch_update_params->vlan = bp->mf_ov; in bnx2x_handle_update_svid_cmd()
2963 BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n", in bnx2x_handle_update_svid_cmd()
2964 bp->mf_ov); in bnx2x_handle_update_svid_cmd()
2967 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", in bnx2x_handle_update_svid_cmd()
2968 bp->mf_ov); in bnx2x_handle_update_svid_cmd()
2985 bp->port.pmf = 1; in bnx2x_pmf_update()
2986 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); in bnx2x_pmf_update()
2990 * bp->port.pmf here and reading it from the bnx2x_periodic_task(). in bnx2x_pmf_update()
2995 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); in bnx2x_pmf_update()
3001 if (bp->common.int_block == INT_BLOCK_HC) { in bnx2x_pmf_update()
3029 mutex_lock(&bp->fw_mb_mutex); in bnx2x_fw_command()
3030 seq = ++bp->fw_seq; in bnx2x_fw_command()
3034 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", in bnx2x_fw_command()
3046 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", in bnx2x_fw_command()
3058 mutex_unlock(&bp->fw_mb_mutex); in bnx2x_fw_command()
3080 storm_memset_func_cfg(bp, &tcfg, p->func_id); in bnx2x_func_init()
3084 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); in bnx2x_func_init()
3085 storm_memset_func_en(bp, p->func_id, 1); in bnx2x_func_init()
3088 if (p->spq_active) { in bnx2x_func_init()
3089 storm_memset_spq_addr(bp, p->spq_map, p->func_id); in bnx2x_func_init()
3091 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); in bnx2x_func_init()
3096 * bnx2x_get_common_flags - Return common flags
3102 * Return the flags that are common for the Tx-only and not normal connections.
3122 if (bp->flags & TX_SWITCHING) in bnx2x_get_common_flags()
3147 /* For FCoE - force usage of default priority (for afex) */ in bnx2x_get_q_flags()
3151 if (fp->mode != TPA_MODE_DISABLED) { in bnx2x_get_q_flags()
3154 if (fp->mode == TPA_MODE_GRO) in bnx2x_get_q_flags()
3177 gen_init->stat_id = bnx2x_stats_id(fp); in bnx2x_pf_q_prep_general()
3178 gen_init->spcl_id = fp->cl_id; in bnx2x_pf_q_prep_general()
3180 /* Always use mini-jumbo MTU for FCoE L2 ring */ in bnx2x_pf_q_prep_general()
3182 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; in bnx2x_pf_q_prep_general()
3184 gen_init->mtu = bp->dev->mtu; in bnx2x_pf_q_prep_general()
3186 gen_init->cos = cos; in bnx2x_pf_q_prep_general()
3188 gen_init->fp_hsi = ETH_FP_HSI_VERSION; in bnx2x_pf_q_prep_general()
3199 if (fp->mode != TPA_MODE_DISABLED) { in bnx2x_pf_rx_q_prep()
3200 pause->sge_th_lo = SGE_TH_LO(bp); in bnx2x_pf_rx_q_prep()
3201 pause->sge_th_hi = SGE_TH_HI(bp); in bnx2x_pf_rx_q_prep()
3204 WARN_ON(bp->dropless_fc && in bnx2x_pf_rx_q_prep()
3205 pause->sge_th_hi + FW_PREFETCH_CNT > in bnx2x_pf_rx_q_prep()
3209 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> in bnx2x_pf_rx_q_prep()
3211 max_sge = ((max_sge + PAGES_PER_SGE - 1) & in bnx2x_pf_rx_q_prep()
3212 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; in bnx2x_pf_rx_q_prep()
3216 /* pause - not for e1 */ in bnx2x_pf_rx_q_prep()
3218 pause->bd_th_lo = BD_TH_LO(bp); in bnx2x_pf_rx_q_prep()
3219 pause->bd_th_hi = BD_TH_HI(bp); in bnx2x_pf_rx_q_prep()
3221 pause->rcq_th_lo = RCQ_TH_LO(bp); in bnx2x_pf_rx_q_prep()
3222 pause->rcq_th_hi = RCQ_TH_HI(bp); in bnx2x_pf_rx_q_prep()
3227 WARN_ON(bp->dropless_fc && in bnx2x_pf_rx_q_prep()
3228 pause->bd_th_hi + FW_PREFETCH_CNT > in bnx2x_pf_rx_q_prep()
3229 bp->rx_ring_size); in bnx2x_pf_rx_q_prep()
3230 WARN_ON(bp->dropless_fc && in bnx2x_pf_rx_q_prep()
3231 pause->rcq_th_hi + FW_PREFETCH_CNT > in bnx2x_pf_rx_q_prep()
3234 pause->pri_map = 1; in bnx2x_pf_rx_q_prep()
3238 rxq_init->dscr_map = fp->rx_desc_mapping; in bnx2x_pf_rx_q_prep()
3239 rxq_init->sge_map = fp->rx_sge_mapping; in bnx2x_pf_rx_q_prep()
3240 rxq_init->rcq_map = fp->rx_comp_mapping; in bnx2x_pf_rx_q_prep()
3241 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; in bnx2x_pf_rx_q_prep()
3246 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - in bnx2x_pf_rx_q_prep()
3247 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; in bnx2x_pf_rx_q_prep()
3249 rxq_init->cl_qzone_id = fp->cl_qzone_id; in bnx2x_pf_rx_q_prep()
3250 rxq_init->tpa_agg_sz = tpa_agg_size; in bnx2x_pf_rx_q_prep()
3251 rxq_init->sge_buf_sz = sge_sz; in bnx2x_pf_rx_q_prep()
3252 rxq_init->max_sges_pkt = max_sge; in bnx2x_pf_rx_q_prep()
3253 rxq_init->rss_engine_id = BP_FUNC(bp); in bnx2x_pf_rx_q_prep()
3254 rxq_init->mcast_engine_id = BP_FUNC(bp); in bnx2x_pf_rx_q_prep()
3261 rxq_init->max_tpa_queues = MAX_AGG_QS(bp); in bnx2x_pf_rx_q_prep()
3263 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; in bnx2x_pf_rx_q_prep()
3264 rxq_init->fw_sb_id = fp->fw_sb_id; in bnx2x_pf_rx_q_prep()
3267 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; in bnx2x_pf_rx_q_prep()
3269 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; in bnx2x_pf_rx_q_prep()
3274 rxq_init->silent_removal_value = bp->afex_def_vlan_tag; in bnx2x_pf_rx_q_prep()
3275 rxq_init->silent_removal_mask = VLAN_VID_MASK; in bnx2x_pf_rx_q_prep()
3283 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping; in bnx2x_pf_tx_q_prep()
3284 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; in bnx2x_pf_tx_q_prep()
3285 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; in bnx2x_pf_tx_q_prep()
3286 txq_init->fw_sb_id = fp->fw_sb_id; in bnx2x_pf_tx_q_prep()
3292 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); in bnx2x_pf_tx_q_prep()
3295 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; in bnx2x_pf_tx_q_prep()
3296 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; in bnx2x_pf_tx_q_prep()
3323 func_init.spq_map = bp->spq_mapping; in bnx2x_pf_init()
3324 func_init.spq_prod = bp->spq_prod_idx; in bnx2x_pf_init()
3328 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); in bnx2x_pf_init()
3334 * re-calculated according to the actual link rate. in bnx2x_pf_init()
3336 bp->link_vars.line_speed = SPEED_10000; in bnx2x_pf_init()
3340 if (bp->port.pmf) in bnx2x_pf_init()
3341 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); in bnx2x_pf_init()
3343 /* init Event Queue - PCI bus guarantees correct endianity*/ in bnx2x_pf_init()
3344 eq_data.base_addr.hi = U64_HI(bp->eq_mapping); in bnx2x_pf_init()
3345 eq_data.base_addr.lo = U64_LO(bp->eq_mapping); in bnx2x_pf_init()
3346 eq_data.producer = bp->eq_prod; in bnx2x_pf_init()
3368 /* Tx queue should be only re-enabled */ in bnx2x_e1h_enable()
3369 netif_tx_wake_all_queues(bp->dev); in bnx2x_e1h_enable()
3382 &bp->slowpath->drv_info_to_mcp.ether_stat; in bnx2x_drv_info_ether_stat()
3384 &bp->sp_objs->mac_obj; in bnx2x_drv_info_ether_stat()
3387 strscpy(ether_stat->version, DRV_MODULE_VERSION, in bnx2x_drv_info_ether_stat()
3399 memset(ether_stat->mac_local + i, 0, in bnx2x_drv_info_ether_stat()
3400 sizeof(ether_stat->mac_local[0])); in bnx2x_drv_info_ether_stat()
3401 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj, in bnx2x_drv_info_ether_stat()
3403 ether_stat->mac_local + MAC_PAD, MAC_PAD, in bnx2x_drv_info_ether_stat()
3405 ether_stat->mtu_size = bp->dev->mtu; in bnx2x_drv_info_ether_stat()
3406 if (bp->dev->features & NETIF_F_RXCSUM) in bnx2x_drv_info_ether_stat()
3407 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; in bnx2x_drv_info_ether_stat()
3408 if (bp->dev->features & NETIF_F_TSO) in bnx2x_drv_info_ether_stat()
3409 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; in bnx2x_drv_info_ether_stat()
3410 ether_stat->feature_flags |= bp->common.boot_mode; in bnx2x_drv_info_ether_stat()
3412 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; in bnx2x_drv_info_ether_stat()
3414 ether_stat->txq_size = bp->tx_ring_size; in bnx2x_drv_info_ether_stat()
3415 ether_stat->rxq_size = bp->rx_ring_size; in bnx2x_drv_info_ether_stat()
3418 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0; in bnx2x_drv_info_ether_stat()
3424 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; in bnx2x_drv_info_fcoe_stat()
3426 &bp->slowpath->drv_info_to_mcp.fcoe_stat; in bnx2x_drv_info_fcoe_stat()
3431 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN); in bnx2x_drv_info_fcoe_stat()
3433 fcoe_stat->qos_priority = in bnx2x_drv_info_fcoe_stat()
3434 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; in bnx2x_drv_info_fcoe_stat()
3439 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. in bnx2x_drv_info_fcoe_stat()
3443 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. in bnx2x_drv_info_fcoe_stat()
3447 &bp->fw_stats_data->fcoe; in bnx2x_drv_info_fcoe_stat()
3449 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3450 fcoe_stat->rx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3451 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); in bnx2x_drv_info_fcoe_stat()
3453 ADD_64_LE(fcoe_stat->rx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3454 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, in bnx2x_drv_info_fcoe_stat()
3455 fcoe_stat->rx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3456 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); in bnx2x_drv_info_fcoe_stat()
3458 ADD_64_LE(fcoe_stat->rx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3459 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, in bnx2x_drv_info_fcoe_stat()
3460 fcoe_stat->rx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3461 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); in bnx2x_drv_info_fcoe_stat()
3463 ADD_64_LE(fcoe_stat->rx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3464 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, in bnx2x_drv_info_fcoe_stat()
3465 fcoe_stat->rx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3466 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); in bnx2x_drv_info_fcoe_stat()
3468 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3469 fcoe_stat->rx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3470 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); in bnx2x_drv_info_fcoe_stat()
3472 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3473 fcoe_stat->rx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3474 fcoe_q_tstorm_stats->rcv_ucast_pkts); in bnx2x_drv_info_fcoe_stat()
3476 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3477 fcoe_stat->rx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3478 fcoe_q_tstorm_stats->rcv_bcast_pkts); in bnx2x_drv_info_fcoe_stat()
3480 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3481 fcoe_stat->rx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3482 fcoe_q_tstorm_stats->rcv_mcast_pkts); in bnx2x_drv_info_fcoe_stat()
3484 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3485 fcoe_stat->tx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3486 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); in bnx2x_drv_info_fcoe_stat()
3488 ADD_64_LE(fcoe_stat->tx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3489 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, in bnx2x_drv_info_fcoe_stat()
3490 fcoe_stat->tx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3491 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); in bnx2x_drv_info_fcoe_stat()
3493 ADD_64_LE(fcoe_stat->tx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3494 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, in bnx2x_drv_info_fcoe_stat()
3495 fcoe_stat->tx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3496 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); in bnx2x_drv_info_fcoe_stat()
3498 ADD_64_LE(fcoe_stat->tx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3499 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, in bnx2x_drv_info_fcoe_stat()
3500 fcoe_stat->tx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3501 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); in bnx2x_drv_info_fcoe_stat()
3503 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3504 fcoe_stat->tx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3505 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); in bnx2x_drv_info_fcoe_stat()
3507 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3508 fcoe_stat->tx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3509 fcoe_q_xstorm_stats->ucast_pkts_sent); in bnx2x_drv_info_fcoe_stat()
3511 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3512 fcoe_stat->tx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3513 fcoe_q_xstorm_stats->bcast_pkts_sent); in bnx2x_drv_info_fcoe_stat()
3515 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3516 fcoe_stat->tx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3517 fcoe_q_xstorm_stats->mcast_pkts_sent); in bnx2x_drv_info_fcoe_stat()
3526 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; in bnx2x_drv_info_iscsi_stat()
3528 &bp->slowpath->drv_info_to_mcp.iscsi_stat; in bnx2x_drv_info_iscsi_stat()
3533 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac, in bnx2x_drv_info_iscsi_stat()
3536 iscsi_stat->qos_priority = in bnx2x_drv_info_iscsi_stat()
3537 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; in bnx2x_drv_info_iscsi_stat()
3555 DP(BNX2X_MSG_MCP, in bnx2x_config_mf_bw()
3560 if (bp->link_vars.link_up) { in bnx2x_config_mf_bw()
3564 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); in bnx2x_config_mf_bw()
3575 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n"); in bnx2x_handle_eee_event()
3589 /* if drv_info version supported by MFW doesn't match - send NACK */ in bnx2x_handle_drv_info_req()
3599 mutex_lock(&bp->drv_info_mutex); in bnx2x_handle_drv_info_req()
3601 memset(&bp->slowpath->drv_info_to_mcp, 0, in bnx2x_handle_drv_info_req()
3615 /* if op code isn't supported - send NACK */ in bnx2x_handle_drv_info_req()
3635 DP(BNX2X_MSG_MCP, "Management does not support indication\n"); in bnx2x_handle_drv_info_req()
3636 } else if (!bp->drv_info_mng_owner) { in bnx2x_handle_drv_info_req()
3654 DP(BNX2X_MSG_MCP, "Management did not release indication\n"); in bnx2x_handle_drv_info_req()
3655 bp->drv_info_mng_owner = true; in bnx2x_handle_drv_info_req()
3659 mutex_unlock(&bp->drv_info_mutex); in bnx2x_handle_drv_info_req()
3671 vals[0] -= '0'; in bnx2x_update_mng_version_utility()
3694 mutex_lock(&bp->drv_info_mutex); in bnx2x_update_mng_version()
3696 if (bp->drv_info_mng_owner) in bnx2x_update_mng_version()
3699 if (bp->state != BNX2X_STATE_OPEN) in bnx2x_update_mng_version()
3708 memset(&bp->slowpath->drv_info_to_mcp, 0, in bnx2x_update_mng_version()
3711 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version; in bnx2x_update_mng_version()
3714 memset(&bp->slowpath->drv_info_to_mcp, 0, in bnx2x_update_mng_version()
3717 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version; in bnx2x_update_mng_version()
3725 mutex_unlock(&bp->drv_info_mutex); in bnx2x_update_mng_version()
3727 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n", in bnx2x_update_mng_version()
3747 /* Check & notify On-Chip dump. */ in bnx2x_update_mfw_dump()
3751 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n"); in bnx2x_update_mfw_dump()
3754 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n"); in bnx2x_update_mfw_dump()
3776 DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event); in bnx2x_oem_event()
3781 * where the bp->flags can change so it is done without any in bnx2x_oem_event()
3784 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { in bnx2x_oem_event()
3785 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n"); in bnx2x_oem_event()
3786 bp->flags |= MF_FUNC_DIS; in bnx2x_oem_event()
3790 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n"); in bnx2x_oem_event()
3791 bp->flags &= ~MF_FUNC_DIS; in bnx2x_oem_event()
3816 struct eth_spe *next_spe = bp->spq_prod_bd; in bnx2x_sp_get_next()
3818 if (bp->spq_prod_bd == bp->spq_last_bd) { in bnx2x_sp_get_next()
3819 bp->spq_prod_bd = bp->spq; in bnx2x_sp_get_next()
3820 bp->spq_prod_idx = 0; in bnx2x_sp_get_next()
3821 DP(BNX2X_MSG_SP, "end of spq\n"); in bnx2x_sp_get_next()
3823 bp->spq_prod_bd++; in bnx2x_sp_get_next()
3824 bp->spq_prod_idx++; in bnx2x_sp_get_next()
3842 bp->spq_prod_idx); in bnx2x_sp_prod_update()
3846 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3866 * bnx2x_sp_post - place a single command on an SP ring
3887 if (unlikely(bp->panic)) { in bnx2x_sp_post()
3889 return -EIO; in bnx2x_sp_post()
3893 spin_lock_bh(&bp->spq_lock); in bnx2x_sp_post()
3896 if (!atomic_read(&bp->eq_spq_left)) { in bnx2x_sp_post()
3898 spin_unlock_bh(&bp->spq_lock); in bnx2x_sp_post()
3900 return -EBUSY; in bnx2x_sp_post()
3902 } else if (!atomic_read(&bp->cq_spq_left)) { in bnx2x_sp_post()
3904 spin_unlock_bh(&bp->spq_lock); in bnx2x_sp_post()
3906 return -EBUSY; in bnx2x_sp_post()
3912 spe->hdr.conn_and_cmd_data = in bnx2x_sp_post()
3916 /* In some cases, type may already contain the func-id in bnx2x_sp_post()
3929 spe->hdr.type = cpu_to_le16(type); in bnx2x_sp_post()
3931 spe->data.update_data_addr.hi = cpu_to_le32(data_hi); in bnx2x_sp_post()
3932 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); in bnx2x_sp_post()
3940 atomic_dec(&bp->eq_spq_left); in bnx2x_sp_post()
3942 atomic_dec(&bp->cq_spq_left); in bnx2x_sp_post()
3944 DP(BNX2X_MSG_SP, in bnx2x_sp_post()
3946 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), in bnx2x_sp_post()
3947 (u32)(U64_LO(bp->spq_mapping) + in bnx2x_sp_post()
3948 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, in bnx2x_sp_post()
3950 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); in bnx2x_sp_post()
3953 spin_unlock_bh(&bp->spq_lock); in bnx2x_sp_post()
3974 rc = -EBUSY; in bnx2x_acquire_alr()
3991 struct host_sp_status_block *def_sb = bp->def_status_blk; in bnx2x_update_dsb_idx()
3995 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { in bnx2x_update_dsb_idx()
3996 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; in bnx2x_update_dsb_idx()
4000 if (bp->def_idx != def_sb->sp_sb.running_index) { in bnx2x_update_dsb_idx()
4001 bp->def_idx = def_sb->sp_sb.running_index; in bnx2x_update_dsb_idx()
4025 if (bp->attn_state & asserted) in bnx2x_attn_int_asserted()
4031 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", in bnx2x_attn_int_asserted()
4034 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); in bnx2x_attn_int_asserted()
4039 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); in bnx2x_attn_int_asserted()
4040 bp->attn_state |= asserted; in bnx2x_attn_int_asserted()
4041 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); in bnx2x_attn_int_asserted()
4063 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); in bnx2x_attn_int_asserted()
4066 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); in bnx2x_attn_int_asserted()
4069 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); in bnx2x_attn_int_asserted()
4072 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); in bnx2x_attn_int_asserted()
4076 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); in bnx2x_attn_int_asserted()
4080 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); in bnx2x_attn_int_asserted()
4084 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); in bnx2x_attn_int_asserted()
4089 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); in bnx2x_attn_int_asserted()
4093 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); in bnx2x_attn_int_asserted()
4097 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); in bnx2x_attn_int_asserted()
4104 if (bp->common.int_block == INT_BLOCK_HC) in bnx2x_attn_int_asserted()
4110 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, in bnx2x_attn_int_asserted()
4111 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); in bnx2x_attn_int_asserted()
4117 * NIG mask. This loop should exit after 2-3 iterations max. in bnx2x_attn_int_asserted()
4119 if (bp->common.int_block != INT_BLOCK_HC) { in bnx2x_attn_int_asserted()
4127 DP(NETIF_MSG_HW, in bnx2x_attn_int_asserted()
4151 …netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card … in bnx2x_fan_failure()
4179 bnx2x_hw_reset_phy(&bp->link_params); in bnx2x_attn_int_deasserted0()
4183 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { in bnx2x_attn_int_deasserted0()
4185 bnx2x_handle_module_detect_int(&bp->link_params); in bnx2x_attn_int_deasserted0()
4247 BNX2X_ERR("PXP hw attention-0 0x%x\n", val); in bnx2x_attn_int_deasserted2()
4254 BNX2X_ERR("PXP hw attention-1 0x%x\n", val); in bnx2x_attn_int_deasserted2()
4287 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, in bnx2x_attn_int_deasserted3()
4308 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) in bnx2x_attn_int_deasserted3()
4311 if (bp->port.pmf && in bnx2x_attn_int_deasserted3()
4313 bp->dcbx_enabled > 0) in bnx2x_attn_int_deasserted3()
4327 if (bp->link_vars.periodic_flags & in bnx2x_attn_int_deasserted3()
4331 bp->link_vars.periodic_flags &= in bnx2x_attn_int_deasserted3()
4367 BNX2X_ERR("GRC time-out 0x%08x\n", val); in bnx2x_attn_int_deasserted3()
4380 * 0-7 - Engine0 load counter.
4381 * 8-15 - Engine1 load counter.
4382 * 16 - Engine0 RESET_IN_PROGRESS bit.
4383 * 17 - Engine1 RESET_IN_PROGRESS bit.
4384 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function
4386 * 19 - Engine1 ONE_IS_LOADED.
4387 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
4439 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); in bnx2x_reset_is_global()
4512 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val); in bnx2x_set_pf_load()
4518 val1 |= (1 << bp->pf_num); in bnx2x_set_pf_load()
4531 * bnx2x_clear_pf_load - clear pf load mark
4549 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val); in bnx2x_clear_pf_load()
4555 val1 &= ~(1 << bp->pf_num); in bnx2x_clear_pf_load()
4581 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val); in bnx2x_get_load_status()
4585 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n", in bnx2x_get_load_status()
4980 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" in bnx2x_parity_attn()
4993 netdev_err(bp->dev, in bnx2x_parity_attn()
5018 * bnx2x_chk_parity_attn - checks for parity attentions.
5127 bp->recovery_state = BNX2X_RECOVERY_INIT; in bnx2x_attn_int_deasserted()
5128 schedule_delayed_work(&bp->sp_rtnl_task, 0); in bnx2x_attn_int_deasserted()
5151 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", in bnx2x_attn_int_deasserted()
5156 group_mask = &bp->attn_group[index]; in bnx2x_attn_int_deasserted()
5158 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n", in bnx2x_attn_int_deasserted()
5160 group_mask->sig[0], group_mask->sig[1], in bnx2x_attn_int_deasserted()
5161 group_mask->sig[2], group_mask->sig[3], in bnx2x_attn_int_deasserted()
5162 group_mask->sig[4]); in bnx2x_attn_int_deasserted()
5165 attn.sig[4] & group_mask->sig[4]); in bnx2x_attn_int_deasserted()
5167 attn.sig[3] & group_mask->sig[3]); in bnx2x_attn_int_deasserted()
5169 attn.sig[1] & group_mask->sig[1]); in bnx2x_attn_int_deasserted()
5171 attn.sig[2] & group_mask->sig[2]); in bnx2x_attn_int_deasserted()
5173 attn.sig[0] & group_mask->sig[0]); in bnx2x_attn_int_deasserted()
5179 if (bp->common.int_block == INT_BLOCK_HC) in bnx2x_attn_int_deasserted()
5186 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, in bnx2x_attn_int_deasserted()
5187 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); in bnx2x_attn_int_deasserted()
5190 if (~bp->attn_state & deasserted) in bnx2x_attn_int_deasserted()
5199 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", in bnx2x_attn_int_deasserted()
5202 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); in bnx2x_attn_int_deasserted()
5207 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); in bnx2x_attn_int_deasserted()
5208 bp->attn_state &= ~deasserted; in bnx2x_attn_int_deasserted()
5209 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); in bnx2x_attn_int_deasserted()
5215 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. in bnx2x_attn_int()
5217 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. in bnx2x_attn_int()
5219 u32 attn_state = bp->attn_state; in bnx2x_attn_int()
5225 DP(NETIF_MSG_HW, in bnx2x_attn_int()
5243 u32 igu_addr = bp->igu_base_addr; in bnx2x_igu_ack_sb()
5258 u8 err = elem->message.error; in bnx2x_cnic_handle_cfc_del()
5260 if (!bp->cnic_eth_dev.starting_cid || in bnx2x_cnic_handle_cfc_del()
5261 (cid < bp->cnic_eth_dev.starting_cid && in bnx2x_cnic_handle_cfc_del()
5262 cid != bp->cnic_eth_dev.iscsi_l2_cid)) in bnx2x_cnic_handle_cfc_del()
5265 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); in bnx2x_cnic_handle_cfc_del()
5284 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_handle_mcast_eqe()
5286 netif_addr_lock_bh(bp->dev); in bnx2x_handle_mcast_eqe()
5289 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); in bnx2x_handle_mcast_eqe()
5291 /* If there are pending mcast commands - send them */ in bnx2x_handle_mcast_eqe()
5292 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { in bnx2x_handle_mcast_eqe()
5299 netif_addr_unlock_bh(bp->dev); in bnx2x_handle_mcast_eqe()
5307 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo); in bnx2x_handle_classification_eqe()
5316 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); in bnx2x_handle_classification_eqe()
5318 vlan_mac_obj = &bp->iscsi_l2_mac_obj; in bnx2x_handle_classification_eqe()
5320 vlan_mac_obj = &bp->sp_objs[cid].mac_obj; in bnx2x_handle_classification_eqe()
5324 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n"); in bnx2x_handle_classification_eqe()
5325 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj; in bnx2x_handle_classification_eqe()
5328 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); in bnx2x_handle_classification_eqe()
5339 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); in bnx2x_handle_classification_eqe()
5344 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); in bnx2x_handle_classification_eqe()
5351 netif_addr_lock_bh(bp->dev); in bnx2x_handle_rx_mode_eqe()
5353 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); in bnx2x_handle_rx_mode_eqe()
5356 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) in bnx2x_handle_rx_mode_eqe()
5359 &bp->sp_state)) in bnx2x_handle_rx_mode_eqe()
5362 &bp->sp_state)) in bnx2x_handle_rx_mode_eqe()
5365 netif_addr_unlock_bh(bp->dev); in bnx2x_handle_rx_mode_eqe()
5371 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) { in bnx2x_after_afex_vif_lists()
5372 DP(BNX2X_MSG_SP, in bnx2x_after_afex_vif_lists()
5374 elem->message.data.vif_list_event.func_bit_map); in bnx2x_after_afex_vif_lists()
5376 elem->message.data.vif_list_event.func_bit_map); in bnx2x_after_afex_vif_lists()
5377 } else if (elem->message.data.vif_list_event.echo == in bnx2x_after_afex_vif_lists()
5379 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n"); in bnx2x_after_afex_vif_lists()
5398 &q_update_params->update_flags); in bnx2x_after_function_update()
5400 &q_update_params->update_flags); in bnx2x_after_function_update()
5404 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { in bnx2x_after_function_update()
5405 q_update_params->silent_removal_value = 0; in bnx2x_after_function_update()
5406 q_update_params->silent_removal_mask = 0; in bnx2x_after_function_update()
5408 q_update_params->silent_removal_value = in bnx2x_after_function_update()
5409 (bp->afex_def_vlan_tag & VLAN_VID_MASK); in bnx2x_after_function_update()
5410 q_update_params->silent_removal_mask = VLAN_VID_MASK; in bnx2x_after_function_update()
5415 fp = &bp->fp[q]; in bnx2x_after_function_update()
5426 fp = &bp->fp[FCOE_IDX(bp)]; in bnx2x_after_function_update()
5434 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); in bnx2x_after_function_update()
5443 /* If no FCoE ring - ACK MCP now */ in bnx2x_after_function_update()
5452 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); in bnx2x_cid_to_q_obj()
5457 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; in bnx2x_cid_to_q_obj()
5469 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; in bnx2x_eq_int()
5470 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; in bnx2x_eq_int()
5472 hw_cons = le16_to_cpu(*bp->eq_cons_sb); in bnx2x_eq_int()
5474 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. in bnx2x_eq_int()
5475 * when we get the next-page we need to adjust so the loop in bnx2x_eq_int()
5486 sw_cons = bp->eq_cons; in bnx2x_eq_int()
5487 sw_prod = bp->eq_prod; in bnx2x_eq_int()
5489 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", in bnx2x_eq_int()
5490 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); in bnx2x_eq_int()
5495 elem = &bp->eq_ring[EQ_DESC(sw_cons)]; in bnx2x_eq_int()
5499 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n", in bnx2x_eq_int()
5504 opcode = elem->message.opcode; in bnx2x_eq_int()
5510 &elem->message.data.vf_pf_event); in bnx2x_eq_int()
5516 bp->stats_comp++); in bnx2x_eq_int()
5528 cid = SW_CID(elem->message.data.cfc_del_event.cid); in bnx2x_eq_int()
5530 DP(BNX2X_MSG_SP, in bnx2x_eq_int()
5539 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) in bnx2x_eq_int()
5545 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); in bnx2x_eq_int()
5547 if (f_obj->complete_cmd(bp, f_obj, in bnx2x_eq_int()
5553 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); in bnx2x_eq_int()
5555 if (f_obj->complete_cmd(bp, f_obj, in bnx2x_eq_int()
5561 echo = elem->message.data.function_update_event.echo; in bnx2x_eq_int()
5563 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, in bnx2x_eq_int()
5565 if (f_obj->complete_cmd( in bnx2x_eq_int()
5572 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, in bnx2x_eq_int()
5574 f_obj->complete_cmd(bp, f_obj, in bnx2x_eq_int()
5587 f_obj->complete_cmd(bp, f_obj, in bnx2x_eq_int()
5592 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, in bnx2x_eq_int()
5594 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) in bnx2x_eq_int()
5600 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, in bnx2x_eq_int()
5602 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) in bnx2x_eq_int()
5608 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP, in bnx2x_eq_int()
5610 if (f_obj->complete_cmd(bp, f_obj, in bnx2x_eq_int()
5616 switch (opcode | bp->state) { in bnx2x_eq_int()
5623 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", in bnx2x_eq_int()
5624 SW_CID(elem->message.data.eth_event.echo)); in bnx2x_eq_int()
5625 rss_raw->clear_pending(rss_raw); in bnx2x_eq_int()
5638 DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n"); in bnx2x_eq_int()
5648 DP(BNX2X_MSG_SP, "got mcast ramrod\n"); in bnx2x_eq_int()
5658 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); in bnx2x_eq_int()
5663 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", in bnx2x_eq_int()
5664 elem->message.opcode, bp->state); in bnx2x_eq_int()
5671 atomic_add(spqe_cnt, &bp->eq_spq_left); in bnx2x_eq_int()
5673 bp->eq_cons = sw_cons; in bnx2x_eq_int()
5674 bp->eq_prod = sw_prod; in bnx2x_eq_int()
5679 bnx2x_update_eq_prod(bp, bp->eq_prod); in bnx2x_eq_int()
5686 DP(BNX2X_MSG_SP, "sp task invoked\n"); in bnx2x_sp_task()
5690 if (atomic_read(&bp->interrupt_occurred)) { in bnx2x_sp_task()
5695 DP(BNX2X_MSG_SP, "status %x\n", status); in bnx2x_sp_task()
5696 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n"); in bnx2x_sp_task()
5697 atomic_set(&bp->interrupt_occurred, 0); in bnx2x_sp_task()
5711 /* Prevent local bottom-halves from running as in bnx2x_sp_task()
5721 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, in bnx2x_sp_task()
5722 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); in bnx2x_sp_task()
5729 DP(BNX2X_MSG_SP, in bnx2x_sp_task()
5733 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, in bnx2x_sp_task()
5734 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); in bnx2x_sp_task()
5737 /* afex - poll to check if VIFSET_ACK should be sent to MFW */ in bnx2x_sp_task()
5739 &bp->sp_state)) { in bnx2x_sp_task()
5750 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, in bnx2x_msix_sp_int()
5754 if (unlikely(bp->panic)) in bnx2x_msix_sp_int()
5762 c_ops = rcu_dereference(bp->cnic_ops); in bnx2x_msix_sp_int()
5764 c_ops->cnic_handler(bp->cnic_data, NULL); in bnx2x_msix_sp_int()
5781 bp->fw_drv_pulse_wr_seq); in bnx2x_drv_pulse()
5788 if (!netif_running(bp->dev)) in bnx2x_timer()
5797 ++bp->fw_drv_pulse_wr_seq; in bnx2x_timer()
5798 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; in bnx2x_timer()
5799 drv_pulse = bp->fw_drv_pulse_wr_seq; in bnx2x_timer()
5809 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5) in bnx2x_timer()
5814 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_timer()
5821 mod_timer(&bp->timer, jiffies + bp->current_interval); in bnx2x_timer()
5843 /* helper: writes FP SP data to FW - data_size in dwords */
5924 hc_sm->igu_sb_id = igu_sb_id; in bnx2x_setup_ndsb_state_machine()
5925 hc_sm->igu_seg_id = igu_seg_id; in bnx2x_setup_ndsb_state_machine()
5926 hc_sm->timer_value = 0xFF; in bnx2x_setup_ndsb_state_machine()
5927 hc_sm->time_to_expire = 0xFFFFFFFF; in bnx2x_setup_ndsb_state_machine()
6013 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); in bnx2x_init_sb()
6015 /* write indices to HW - PCI guarantees endianity of regpairs */ in bnx2x_init_sb()
6037 struct host_sp_status_block *def_sb = bp->def_status_blk; in bnx2x_init_def_sb()
6038 dma_addr_t mapping = bp->def_status_blk_mapping; in bnx2x_init_def_sb()
6053 igu_sp_sb_index = bp->igu_dsb_id; in bnx2x_init_def_sb()
6060 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; in bnx2x_init_def_sb()
6062 bp->attn_state = 0; in bnx2x_init_def_sb()
6072 bp->attn_group[index].sig[sindex] = in bnx2x_init_def_sb()
6081 bp->attn_group[index].sig[4] = REG_RD(bp, in bnx2x_init_def_sb()
6084 bp->attn_group[index].sig[4] = 0; in bnx2x_init_def_sb()
6087 if (bp->common.int_block == INT_BLOCK_HC) { in bnx2x_init_def_sb()
6115 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); in bnx2x_init_def_sb()
6123 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, in bnx2x_update_coalesce()
6124 bp->tx_ticks, bp->rx_ticks); in bnx2x_update_coalesce()
6129 spin_lock_init(&bp->spq_lock); in bnx2x_init_sp_ring()
6130 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); in bnx2x_init_sp_ring()
6132 bp->spq_prod_idx = 0; in bnx2x_init_sp_ring()
6133 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; in bnx2x_init_sp_ring()
6134 bp->spq_prod_bd = bp->spq; in bnx2x_init_sp_ring()
6135 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; in bnx2x_init_sp_ring()
6143 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; in bnx2x_init_eq_ring()
6145 elem->next_page.addr.hi = in bnx2x_init_eq_ring()
6146 cpu_to_le32(U64_HI(bp->eq_mapping + in bnx2x_init_eq_ring()
6148 elem->next_page.addr.lo = in bnx2x_init_eq_ring()
6149 cpu_to_le32(U64_LO(bp->eq_mapping + in bnx2x_init_eq_ring()
6152 bp->eq_cons = 0; in bnx2x_init_eq_ring()
6153 bp->eq_prod = NUM_EQ_DESC; in bnx2x_init_eq_ring()
6154 bp->eq_cons_sb = BNX2X_EQ_INDEX; in bnx2x_init_eq_ring()
6156 atomic_set(&bp->eq_spq_left, in bnx2x_init_eq_ring()
6157 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); in bnx2x_init_eq_ring()
6175 ramrod_param.rx_mode_obj = &bp->rx_mode_obj; in bnx2x_set_q_rx_mode()
6178 ramrod_param.pstate = &bp->sp_state; in bnx2x_set_q_rx_mode()
6184 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); in bnx2x_set_q_rx_mode()
6194 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); in bnx2x_set_q_rx_mode()
6226 if (bp->accept_any_vlan) { in bnx2x_fill_accept_flags()
6242 if (bp->accept_any_vlan) { in bnx2x_fill_accept_flags()
6273 return -EINVAL; in bnx2x_fill_accept_flags()
6290 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags, in bnx2x_set_storm_rx_mode()
6298 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, in bnx2x_set_storm_rx_mode()
6344 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp); in bnx2x_fp_igu_sb_id()
6349 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp); in bnx2x_fp_fw_sb_id()
6354 if (CHIP_IS_E1x(fp->bp)) in bnx2x_fp_cl_id()
6355 return BP_L_ID(fp->bp) + fp->index; in bnx2x_fp_cl_id()
6362 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; in bnx2x_init_eth_fp()
6366 fp->rx_queue = fp_idx; in bnx2x_init_eth_fp()
6367 fp->cid = fp_idx; in bnx2x_init_eth_fp()
6368 fp->cl_id = bnx2x_fp_cl_id(fp); in bnx2x_init_eth_fp()
6369 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); in bnx2x_init_eth_fp()
6370 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); in bnx2x_init_eth_fp()
6372 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); in bnx2x_init_eth_fp()
6375 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); in bnx2x_init_eth_fp()
6378 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; in bnx2x_init_eth_fp()
6384 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); in bnx2x_init_eth_fp()
6388 bnx2x_init_txdata(bp, fp->txdata_ptr[cos], in bnx2x_init_eth_fp()
6389 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), in bnx2x_init_eth_fp()
6392 cids[cos] = fp->txdata_ptr[cos]->cid; in bnx2x_init_eth_fp()
6399 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, in bnx2x_init_eth_fp()
6400 fp->fw_sb_id, fp->igu_sb_id); in bnx2x_init_eth_fp()
6402 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids, in bnx2x_init_eth_fp()
6403 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), in bnx2x_init_eth_fp()
6411 DP(NETIF_MSG_IFUP, in bnx2x_init_eth_fp()
6413 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, in bnx2x_init_eth_fp()
6414 fp->igu_sb_id); in bnx2x_init_eth_fp()
6423 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; in bnx2x_init_tx_ring_one()
6425 tx_next_bd->addr_hi = in bnx2x_init_tx_ring_one()
6426 cpu_to_le32(U64_HI(txdata->tx_desc_mapping + in bnx2x_init_tx_ring_one()
6428 tx_next_bd->addr_lo = in bnx2x_init_tx_ring_one()
6429 cpu_to_le32(U64_LO(txdata->tx_desc_mapping + in bnx2x_init_tx_ring_one()
6433 *txdata->tx_cons_sb = cpu_to_le16(0); in bnx2x_init_tx_ring_one()
6435 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); in bnx2x_init_tx_ring_one()
6436 txdata->tx_db.data.zero_fill1 = 0; in bnx2x_init_tx_ring_one()
6437 txdata->tx_db.data.prod = 0; in bnx2x_init_tx_ring_one()
6439 txdata->tx_pkt_prod = 0; in bnx2x_init_tx_ring_one()
6440 txdata->tx_pkt_cons = 0; in bnx2x_init_tx_ring_one()
6441 txdata->tx_bd_prod = 0; in bnx2x_init_tx_ring_one()
6442 txdata->tx_bd_cons = 0; in bnx2x_init_tx_ring_one()
6443 txdata->tx_pkt = 0; in bnx2x_init_tx_ring_one()
6451 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); in bnx2x_init_tx_rings_cnic()
6460 for_each_cos_in_tx_queue(&bp->fp[i], cos) in bnx2x_init_tx_rings()
6461 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); in bnx2x_init_tx_rings()
6474 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; in bnx2x_init_fcoe_fp()
6477 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, in bnx2x_init_fcoe_fp()
6480 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); in bnx2x_init_fcoe_fp()
6492 /* No multi-CoS for FCoE L2 client */ in bnx2x_init_fcoe_fp()
6493 BUG_ON(fp->max_cos != 1); in bnx2x_init_fcoe_fp()
6495 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, in bnx2x_init_fcoe_fp()
6496 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), in bnx2x_init_fcoe_fp()
6499 DP(NETIF_MSG_IFUP, in bnx2x_init_fcoe_fp()
6501 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, in bnx2x_init_fcoe_fp()
6502 fp->igu_sb_id); in bnx2x_init_fcoe_fp()
6510 bnx2x_init_sb(bp, bp->cnic_sb_mapping, in bnx2x_nic_init_cnic()
6538 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, in bnx2x_pre_irq_nic_init()
6539 bp->common.shmem_base, in bnx2x_pre_irq_nic_init()
6540 bp->common.shmem2_base, BP_PORT(bp)); in bnx2x_pre_irq_nic_init()
6572 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, in bnx2x_gunzip_init()
6573 &bp->gunzip_mapping, GFP_KERNEL); in bnx2x_gunzip_init()
6574 if (bp->gunzip_buf == NULL) in bnx2x_gunzip_init()
6577 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); in bnx2x_gunzip_init()
6578 if (bp->strm == NULL) in bnx2x_gunzip_init()
6581 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); in bnx2x_gunzip_init()
6582 if (bp->strm->workspace == NULL) in bnx2x_gunzip_init()
6588 kfree(bp->strm); in bnx2x_gunzip_init()
6589 bp->strm = NULL; in bnx2x_gunzip_init()
6592 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, in bnx2x_gunzip_init()
6593 bp->gunzip_mapping); in bnx2x_gunzip_init()
6594 bp->gunzip_buf = NULL; in bnx2x_gunzip_init()
6597 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n"); in bnx2x_gunzip_init()
6598 return -ENOMEM; in bnx2x_gunzip_init()
6603 if (bp->strm) { in bnx2x_gunzip_end()
6604 vfree(bp->strm->workspace); in bnx2x_gunzip_end()
6605 kfree(bp->strm); in bnx2x_gunzip_end()
6606 bp->strm = NULL; in bnx2x_gunzip_end()
6609 if (bp->gunzip_buf) { in bnx2x_gunzip_end()
6610 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, in bnx2x_gunzip_end()
6611 bp->gunzip_mapping); in bnx2x_gunzip_end()
6612 bp->gunzip_buf = NULL; in bnx2x_gunzip_end()
6623 return -EINVAL; in bnx2x_gunzip()
6633 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; in bnx2x_gunzip()
6634 bp->strm->avail_in = len - n; in bnx2x_gunzip()
6635 bp->strm->next_out = bp->gunzip_buf; in bnx2x_gunzip()
6636 bp->strm->avail_out = FW_BUF_SIZE; in bnx2x_gunzip()
6638 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); in bnx2x_gunzip()
6642 rc = zlib_inflate(bp->strm, Z_FINISH); in bnx2x_gunzip()
6644 netdev_err(bp->dev, "Firmware decompression error: %s\n", in bnx2x_gunzip()
6645 bp->strm->msg); in bnx2x_gunzip()
6647 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); in bnx2x_gunzip()
6648 if (bp->gunzip_outlen & 0x3) in bnx2x_gunzip()
6649 netdev_err(bp->dev, in bnx2x_gunzip()
6651 bp->gunzip_outlen); in bnx2x_gunzip()
6652 bp->gunzip_outlen >>= 2; in bnx2x_gunzip()
6654 zlib_inflateEnd(bp->strm); in bnx2x_gunzip()
6679 /* NON-IP protocol */ in bnx2x_lb_pckt()
6726 count--; in bnx2x_int_mem_test()
6730 return -1; in bnx2x_int_mem_test()
6741 count--; in bnx2x_int_mem_test()
6745 return -2; in bnx2x_int_mem_test()
6756 DP(NETIF_MSG_HW, "part2\n"); in bnx2x_int_mem_test()
6782 count--; in bnx2x_int_mem_test()
6786 return -3; in bnx2x_int_mem_test()
6810 return -4; in bnx2x_int_mem_test()
6830 DP(NETIF_MSG_HW, "done\n"); in bnx2x_int_mem_test()
6913 bp->dmae_ready = 0; in bnx2x_setup_dmae()
6914 spin_lock_init(&bp->dmae_lock); in bnx2x_setup_dmae()
6922 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); in bnx2x_init_pxp()
6923 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); in bnx2x_init_pxp()
6925 if (bp->mrrs == -1) in bnx2x_init_pxp()
6928 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); in bnx2x_init_pxp()
6929 r_order = bp->mrrs; in bnx2x_init_pxp()
6961 bp->common.shmem_base, in bnx2x_setup_fan_failure_detection()
6962 bp->common.shmem2_base, in bnx2x_setup_fan_failure_detection()
6966 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); in bnx2x_setup_fan_failure_detection()
7002 shmem_base[0] = bp->common.shmem_base; in bnx2x__common_init_phy()
7003 shmem2_base[0] = bp->common.shmem2_base; in bnx2x__common_init_phy()
7012 bp->common.chip_id); in bnx2x__common_init_phy()
7048 * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
7056 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); in bnx2x_init_hw_common()
7082 * 4-port mode or 2-port mode we need to turn of master-enable in bnx2x_init_hw_common()
7084 * so, we disregard multi-function or not, and always disable in bnx2x_init_hw_common()
7125 return -EBUSY; in bnx2x_init_hw_common()
7130 return -EBUSY; in bnx2x_init_hw_common()
7140 * (i.e. vnic3) to start even if it is marked as "scan-off". in bnx2x_init_hw_common()
7142 * as "scan-off". Real-life scenario for example: if a driver is being in bnx2x_init_hw_common()
7143 * load-unloaded while func6,7 are down. This will cause the timer to access in bnx2x_init_hw_common()
7158 * dmae-operations (writing to pram for example.) in bnx2x_init_hw_common()
7168 * b. Wait 20msec. - note that this timeout is needed to make in bnx2x_init_hw_common()
7199 * PF-s might be dynamic. in bnx2x_init_hw_common()
7208 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; in bnx2x_init_hw_common()
7245 } while (factor-- && (val != 1)); in bnx2x_init_hw_common()
7249 return -EBUSY; in bnx2x_init_hw_common()
7258 bp->dmae_ready = 1; in bnx2x_init_hw_common()
7277 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); in bnx2x_init_hw_common()
7298 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); in bnx2x_init_hw_common()
7311 /* Bit-map indicating which L2 hdrs may appear in bnx2x_init_hw_common()
7315 bp->path_has_ovlan ? 7 : 6); in bnx2x_init_hw_common()
7363 bp->path_has_ovlan ? 7 : 6); in bnx2x_init_hw_common()
7387 dev_alert(&bp->pdev->dev, in bnx2x_init_hw_common()
7431 /* in E3 this done in per-port section */ in bnx2x_init_hw_common()
7446 return -EBUSY; in bnx2x_init_hw_common()
7451 return -EBUSY; in bnx2x_init_hw_common()
7456 return -EBUSY; in bnx2x_init_hw_common()
7469 return -EBUSY; in bnx2x_init_hw_common()
7485 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); in bnx2x_init_hw_common()
7494 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
7505 /* In E2 2-PORT mode, same ext phy is used for the two paths */ in bnx2x_init_hw_common_chip()
7519 DP(NETIF_MSG_HW, "starting port init port %d\n", port); in bnx2x_init_hw_port()
7529 * attempted. Therefore we manually added the enable-master to the in bnx2x_init_hw_port()
7546 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); in bnx2x_init_hw_port()
7561 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); in bnx2x_init_hw_port()
7562 else if (bp->dev->mtu > 4096) { in bnx2x_init_hw_port()
7563 if (bp->flags & ONE_PORT_FLAG) in bnx2x_init_hw_port()
7566 val = bp->dev->mtu; in bnx2x_init_hw_port()
7572 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); in bnx2x_init_hw_port()
7597 /* Ovlan exists only if we are in multi-function + in bnx2x_init_hw_port()
7598 * switch-dependent mode, in switch-independent there in bnx2x_init_hw_port()
7604 (bp->path_has_ovlan ? 7 : 6)); in bnx2x_init_hw_port()
7630 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); in bnx2x_init_hw_port()
7654 * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use in bnx2x_init_hw_port()
7655 * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF in bnx2x_init_hw_port()
7656 * bits 4-7 are used for "per vn group attention" */ in bnx2x_init_hw_port()
7676 /* Bit-map indicating which L2 hdrs may appear after the in bnx2x_init_hw_port()
7704 switch (bp->mf_mode) { in bnx2x_init_hw_port()
7735 bp->flags |= PTP_SUPPORTED; in bnx2x_init_hw_port()
7778 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", in bnx2x_igu_clear_sb_gen()
7782 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", in bnx2x_igu_clear_sb_gen()
7788 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) in bnx2x_igu_clear_sb_gen()
7792 DP(NETIF_MSG_HW, in bnx2x_igu_clear_sb_gen()
7813 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); in bnx2x_init_searcher()
7829 func_params.f_obj = &bp->func_obj; in bnx2x_func_switch_update()
7834 &switch_update_params->changes); in bnx2x_func_switch_update()
7837 &switch_update_params->changes); in bnx2x_func_switch_update()
7850 if (bp->mf_mode == SINGLE_FUNCTION) { in bnx2x_reset_nic_mode()
7851 bnx2x_set_rx_filter(&bp->link_params, 0); in bnx2x_reset_nic_mode()
7880 BNX2X_ERR("Can't suspend tx-switching!\n"); in bnx2x_reset_nic_mode()
7888 if (bp->mf_mode == SINGLE_FUNCTION) { in bnx2x_reset_nic_mode()
7889 bnx2x_set_rx_filter(&bp->link_params, 1); in bnx2x_reset_nic_mode()
7908 BNX2X_ERR("Can't resume tx-switching!\n"); in bnx2x_reset_nic_mode()
7912 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); in bnx2x_reset_nic_mode()
7936 /* previous driver DMAE transaction may have occurred when pre-boot stage ended
7938 * the addresses of the transaction, resulting in was-error bit set in the pci
7939 * causing all hw-to-host pcie transactions to timeout. If this happened we want
7961 DP(NETIF_MSG_HW, "starting func init func %d\n", func); in bnx2x_init_hw_func()
7963 /* FLR cleanup - hmmm */ in bnx2x_init_hw_func()
7973 if (bp->common.int_block == INT_BLOCK_HC) { in bnx2x_init_hw_func()
7984 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; in bnx2x_init_hw_func()
7993 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; in bnx2x_init_hw_func()
7995 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; in bnx2x_init_hw_func()
7996 ilt->lines[cdu_ilt_start + i].page_mapping = in bnx2x_init_hw_func()
7997 bp->context[i].cxt_mapping; in bnx2x_init_hw_func()
7998 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; in bnx2x_init_hw_func()
8006 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); in bnx2x_init_hw_func()
8010 DP(NETIF_MSG_IFUP, "NIC MODE configured\n"); in bnx2x_init_hw_func()
8019 if (!(bp->flags & USING_MSIX_FLAG)) in bnx2x_init_hw_func()
8029 * Master enable - Due to WB DMAE writes performed before this in bnx2x_init_hw_func()
8030 * register is re-initialized as part of the regular function in bnx2x_init_hw_func()
8038 bp->dmae_ready = 1; in bnx2x_init_hw_func()
8098 bp->mf_ov); in bnx2x_init_hw_func()
8105 if (bp->common.int_block == INT_BLOCK_HC) { in bnx2x_init_hw_func()
8130 * E2 mode: address 0-135 match to the mapping memory; in bnx2x_init_hw_func()
8131 * 136 - PF0 default prod; 137 - PF1 default prod; in bnx2x_init_hw_func()
8132 * 138 - PF2 default prod; 139 - PF3 default prod; in bnx2x_init_hw_func()
8133 * 140 - PF0 attn prod; 141 - PF1 attn prod; in bnx2x_init_hw_func()
8134 * 142 - PF2 attn prod; 143 - PF3 attn prod; in bnx2x_init_hw_func()
8135 * 144-147 reserved. in bnx2x_init_hw_func()
8137 * E1.5 mode - In backward compatible mode; in bnx2x_init_hw_func()
8141 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 in bnx2x_init_hw_func()
8144 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; in bnx2x_init_hw_func()
8145 * 132-135 C prods; 136-139 X prods; 140-143 T prods; in bnx2x_init_hw_func()
8146 * 144-147 attn prods; in bnx2x_init_hw_func()
8148 /* non-default-status-blocks */ in bnx2x_init_hw_func()
8151 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { in bnx2x_init_hw_func()
8152 prod_offset = (bp->igu_base_sb + sb_idx) * in bnx2x_init_hw_func()
8161 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, in bnx2x_init_hw_func()
8164 bp->igu_base_sb + sb_idx); in bnx2x_init_hw_func()
8167 /* default-status-blocks */ in bnx2x_init_hw_func()
8181 * igu prods come in chunks of E1HVN_MAX (4) - in bnx2x_init_hw_func()
8192 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8194 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8196 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8198 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8200 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8203 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8205 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8208 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); in bnx2x_init_hw_func()
8211 rf-tool supports split-68 const */ in bnx2x_init_hw_func()
8234 DP(NETIF_MSG_HW, in bnx2x_init_hw_func()
8238 /* Clear "false" parity errors in MSI-X table */ in bnx2x_init_hw_func()
8262 bnx2x_phy_probe(&bp->link_params); in bnx2x_init_hw_func()
8272 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, in bnx2x_free_mem_cnic()
8275 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, in bnx2x_free_mem_cnic()
8278 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); in bnx2x_free_mem_cnic()
8285 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, in bnx2x_free_mem()
8286 bp->fw_stats_data_sz + bp->fw_stats_req_sz); in bnx2x_free_mem()
8291 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, in bnx2x_free_mem()
8294 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, in bnx2x_free_mem()
8298 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, in bnx2x_free_mem()
8299 bp->context[i].size); in bnx2x_free_mem()
8302 BNX2X_FREE(bp->ilt->lines); in bnx2x_free_mem()
8304 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); in bnx2x_free_mem()
8306 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, in bnx2x_free_mem()
8309 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); in bnx2x_free_mem()
8318 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, in bnx2x_alloc_mem_cnic()
8320 if (!bp->cnic_sb.e2_sb) in bnx2x_alloc_mem_cnic()
8323 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, in bnx2x_alloc_mem_cnic()
8325 if (!bp->cnic_sb.e1x_sb) in bnx2x_alloc_mem_cnic()
8329 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) { in bnx2x_alloc_mem_cnic()
8331 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); in bnx2x_alloc_mem_cnic()
8332 if (!bp->t2) in bnx2x_alloc_mem_cnic()
8337 bp->cnic_eth_dev.addr_drv_info_to_mcp = in bnx2x_alloc_mem_cnic()
8338 &bp->slowpath->drv_info_to_mcp; in bnx2x_alloc_mem_cnic()
8348 return -ENOMEM; in bnx2x_alloc_mem_cnic()
8355 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) { in bnx2x_alloc_mem()
8357 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); in bnx2x_alloc_mem()
8358 if (!bp->t2) in bnx2x_alloc_mem()
8362 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping, in bnx2x_alloc_mem()
8364 if (!bp->def_status_blk) in bnx2x_alloc_mem()
8367 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping, in bnx2x_alloc_mem()
8369 if (!bp->slowpath) in bnx2x_alloc_mem()
8375 * 1. There are multiple entities allocating memory for context - in bnx2x_alloc_mem()
8378 * 2. Since CDU page-size is not a single 4KB page (which is the case in bnx2x_alloc_mem()
8380 * allocation of sub-page-size in the last entry. in bnx2x_alloc_mem()
8388 bp->context[i].size = min(CDU_ILT_PAGE_SZ, in bnx2x_alloc_mem()
8389 (context_size - allocated)); in bnx2x_alloc_mem()
8390 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping, in bnx2x_alloc_mem()
8391 bp->context[i].size); in bnx2x_alloc_mem()
8392 if (!bp->context[i].vcxt) in bnx2x_alloc_mem()
8394 allocated += bp->context[i].size; in bnx2x_alloc_mem()
8396 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line), in bnx2x_alloc_mem()
8398 if (!bp->ilt->lines) in bnx2x_alloc_mem()
8408 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE); in bnx2x_alloc_mem()
8409 if (!bp->spq) in bnx2x_alloc_mem()
8413 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping, in bnx2x_alloc_mem()
8415 if (!bp->eq_ring) in bnx2x_alloc_mem()
8423 return -ENOMEM; in bnx2x_alloc_mem()
8458 if (rc == -EEXIST) { in bnx2x_set_mac_one()
8459 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); in bnx2x_set_mac_one()
8494 if (rc == -EEXIST) { in bnx2x_set_vlan_one()
8496 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); in bnx2x_set_vlan_one()
8510 list_for_each_entry(vlan, &bp->vlan_reg, link) in bnx2x_clear_vlan_info()
8511 vlan->hw = false; in bnx2x_clear_vlan_info()
8513 bp->vlan_cnt = 0; in bnx2x_clear_vlan_info()
8518 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj; in bnx2x_del_all_vlans()
8524 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags); in bnx2x_del_all_vlans()
8547 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); in bnx2x_del_all_macs()
8559 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); in bnx2x_set_eth_mac()
8561 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, in bnx2x_set_eth_mac()
8562 &bp->sp_objs->mac_obj, set, in bnx2x_set_eth_mac()
8565 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, in bnx2x_set_eth_mac()
8566 bp->fp->index, set); in bnx2x_set_eth_mac()
8573 return bnx2x_setup_queue(bp, &bp->fp[0], true); in bnx2x_setup_leading()
8575 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true); in bnx2x_setup_leading()
8579 * bnx2x_set_int_mode - configure interrupt mode
8583 * In case of MSI-X it will also try to enable MSI-X.
8591 return -EINVAL; in bnx2x_set_int_mode()
8607 /* failed to enable multiple MSI-X */ in bnx2x_set_int_mode()
8608 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", in bnx2x_set_int_mode()
8609 bp->num_queues, in bnx2x_set_int_mode()
8610 1 + bp->num_cnic_queues); in bnx2x_set_int_mode()
8618 bp->num_ethernet_queues = 1; in bnx2x_set_int_mode()
8619 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_set_int_mode()
8624 return -EINVAL; in bnx2x_set_int_mode()
8643 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); in bnx2x_ilt_set_info()
8644 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); in bnx2x_ilt_set_info()
8647 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; in bnx2x_ilt_set_info()
8648 ilt_client->client_num = ILT_CLIENT_CDU; in bnx2x_ilt_set_info()
8649 ilt_client->page_size = CDU_ILT_PAGE_SZ; in bnx2x_ilt_set_info()
8650 ilt_client->flags = ILT_CLIENT_SKIP_MEM; in bnx2x_ilt_set_info()
8651 ilt_client->start = line; in bnx2x_ilt_set_info()
8656 ilt_client->end = line - 1; in bnx2x_ilt_set_info()
8658 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", in bnx2x_ilt_set_info()
8659 ilt_client->start, in bnx2x_ilt_set_info()
8660 ilt_client->end, in bnx2x_ilt_set_info()
8661 ilt_client->page_size, in bnx2x_ilt_set_info()
8662 ilt_client->flags, in bnx2x_ilt_set_info()
8663 ilog2(ilt_client->page_size >> 12)); in bnx2x_ilt_set_info()
8666 if (QM_INIT(bp->qm_cid_count)) { in bnx2x_ilt_set_info()
8667 ilt_client = &ilt->clients[ILT_CLIENT_QM]; in bnx2x_ilt_set_info()
8668 ilt_client->client_num = ILT_CLIENT_QM; in bnx2x_ilt_set_info()
8669 ilt_client->page_size = QM_ILT_PAGE_SZ; in bnx2x_ilt_set_info()
8670 ilt_client->flags = 0; in bnx2x_ilt_set_info()
8671 ilt_client->start = line; in bnx2x_ilt_set_info()
8674 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, in bnx2x_ilt_set_info()
8677 ilt_client->end = line - 1; in bnx2x_ilt_set_info()
8679 DP(NETIF_MSG_IFUP, in bnx2x_ilt_set_info()
8681 ilt_client->start, in bnx2x_ilt_set_info()
8682 ilt_client->end, in bnx2x_ilt_set_info()
8683 ilt_client->page_size, in bnx2x_ilt_set_info()
8684 ilt_client->flags, in bnx2x_ilt_set_info()
8685 ilog2(ilt_client->page_size >> 12)); in bnx2x_ilt_set_info()
8690 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; in bnx2x_ilt_set_info()
8691 ilt_client->client_num = ILT_CLIENT_SRC; in bnx2x_ilt_set_info()
8692 ilt_client->page_size = SRC_ILT_PAGE_SZ; in bnx2x_ilt_set_info()
8693 ilt_client->flags = 0; in bnx2x_ilt_set_info()
8694 ilt_client->start = line; in bnx2x_ilt_set_info()
8696 ilt_client->end = line - 1; in bnx2x_ilt_set_info()
8698 DP(NETIF_MSG_IFUP, in bnx2x_ilt_set_info()
8700 ilt_client->start, in bnx2x_ilt_set_info()
8701 ilt_client->end, in bnx2x_ilt_set_info()
8702 ilt_client->page_size, in bnx2x_ilt_set_info()
8703 ilt_client->flags, in bnx2x_ilt_set_info()
8704 ilog2(ilt_client->page_size >> 12)); in bnx2x_ilt_set_info()
8707 ilt_client = &ilt->clients[ILT_CLIENT_TM]; in bnx2x_ilt_set_info()
8708 ilt_client->client_num = ILT_CLIENT_TM; in bnx2x_ilt_set_info()
8709 ilt_client->page_size = TM_ILT_PAGE_SZ; in bnx2x_ilt_set_info()
8710 ilt_client->flags = 0; in bnx2x_ilt_set_info()
8711 ilt_client->start = line; in bnx2x_ilt_set_info()
8713 ilt_client->end = line - 1; in bnx2x_ilt_set_info()
8715 DP(NETIF_MSG_IFUP, in bnx2x_ilt_set_info()
8717 ilt_client->start, in bnx2x_ilt_set_info()
8718 ilt_client->end, in bnx2x_ilt_set_info()
8719 ilt_client->page_size, in bnx2x_ilt_set_info()
8720 ilt_client->flags, in bnx2x_ilt_set_info()
8721 ilog2(ilt_client->page_size >> 12)); in bnx2x_ilt_set_info()
8728 * bnx2x_pf_q_prep_init - prepare INIT transition parameters
8735 * - HC configuration
8736 * - Queue's CDU context
8746 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); in bnx2x_pf_q_prep_init()
8747 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); in bnx2x_pf_q_prep_init()
8752 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); in bnx2x_pf_q_prep_init()
8753 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); in bnx2x_pf_q_prep_init()
8756 init_params->rx.hc_rate = bp->rx_ticks ? in bnx2x_pf_q_prep_init()
8757 (1000000 / bp->rx_ticks) : 0; in bnx2x_pf_q_prep_init()
8758 init_params->tx.hc_rate = bp->tx_ticks ? in bnx2x_pf_q_prep_init()
8759 (1000000 / bp->tx_ticks) : 0; in bnx2x_pf_q_prep_init()
8762 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = in bnx2x_pf_q_prep_init()
8763 fp->fw_sb_id; in bnx2x_pf_q_prep_init()
8769 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; in bnx2x_pf_q_prep_init()
8770 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; in bnx2x_pf_q_prep_init()
8774 init_params->max_cos = fp->max_cos; in bnx2x_pf_q_prep_init()
8776 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n", in bnx2x_pf_q_prep_init()
8777 fp->index, init_params->max_cos); in bnx2x_pf_q_prep_init()
8780 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { in bnx2x_pf_q_prep_init()
8781 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS; in bnx2x_pf_q_prep_init()
8782 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index * in bnx2x_pf_q_prep_init()
8784 init_params->cxts[cos] = in bnx2x_pf_q_prep_init()
8785 &bp->context[cxt_index].vcxt[cxt_offset].eth; in bnx2x_pf_q_prep_init()
8797 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; in bnx2x_setup_tx_only()
8799 /* Set tx-only QUEUE flags: don't zero statistics */ in bnx2x_setup_tx_only()
8800 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); in bnx2x_setup_tx_only()
8803 tx_only_params->cid_index = tx_index; in bnx2x_setup_tx_only()
8806 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); in bnx2x_setup_tx_only()
8809 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); in bnx2x_setup_tx_only()
8811 DP(NETIF_MSG_IFUP, in bnx2x_setup_tx_only()
8812 …"preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp in bnx2x_setup_tx_only()
8813 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], in bnx2x_setup_tx_only()
8814 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, in bnx2x_setup_tx_only()
8815 tx_only_params->gen_params.spcl_id, tx_only_params->flags); in bnx2x_setup_tx_only()
8822 * bnx2x_setup_queue - setup queue
8829 * actually: 1) RESET->INIT 2) INIT->SETUP
8843 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index); in bnx2x_setup_queue()
8847 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, in bnx2x_setup_queue()
8863 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); in bnx2x_setup_queue()
8867 DP(NETIF_MSG_IFUP, "init complete\n"); in bnx2x_setup_queue()
8873 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); in bnx2x_setup_queue()
8876 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, in bnx2x_setup_queue()
8879 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, in bnx2x_setup_queue()
8880 &setup_params->rxq_params); in bnx2x_setup_queue()
8882 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, in bnx2x_setup_queue()
8889 bp->fcoe_init = true; in bnx2x_setup_queue()
8894 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); in bnx2x_setup_queue()
8898 /* loop through the relevant tx-only indices */ in bnx2x_setup_queue()
8900 tx_index < fp->max_cos; in bnx2x_setup_queue()
8903 /* prepare and send tx-only ramrod*/ in bnx2x_setup_queue()
8908 fp->index, tx_index); in bnx2x_setup_queue()
8918 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_stop_queue()
8923 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); in bnx2x_stop_queue()
8929 /* close tx-only connections */ in bnx2x_stop_queue()
8931 tx_index < fp->max_cos; in bnx2x_stop_queue()
8935 txdata = fp->txdata_ptr[tx_index]; in bnx2x_stop_queue()
8937 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", in bnx2x_stop_queue()
8938 txdata->txq_index); in bnx2x_stop_queue()
8940 /* send halt terminate on tx-only connection */ in bnx2x_stop_queue()
8950 /* send halt terminate on tx-only connection */ in bnx2x_stop_queue()
8996 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_reset_func()
8998 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), in bnx2x_reset_func()
9018 if (bp->common.int_block == INT_BLOCK_HC) { in bnx2x_reset_func()
9042 /* Timers workaround bug for E2: if this is vnic-3, in bnx2x_reset_func()
9050 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; in bnx2x_reset_func()
9060 bp->dmae_ready = 0; in bnx2x_reset_func()
9086 DP(NETIF_MSG_IFDOWN, in bnx2x_reset_port()
9099 func_params.f_obj = &bp->func_obj; in bnx2x_reset_hw()
9114 func_params.f_obj = &bp->func_obj; in bnx2x_func_stop()
9138 * bnx2x_send_unload_req - request unload mode from the MCP.
9154 else if (bp->flags & NO_WOL_FLAG) in bnx2x_send_unload_req()
9157 else if (bp->wol) { in bnx2x_send_unload_req()
9159 const u8 *mac_addr = bp->dev->dev_addr; in bnx2x_send_unload_req()
9160 struct pci_dev *pdev = bp->pdev; in bnx2x_send_unload_req()
9164 /* The mac address is written to entries 1-4 to in bnx2x_send_unload_req()
9177 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc); in bnx2x_send_unload_req()
9179 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc); in bnx2x_send_unload_req()
9192 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", in bnx2x_send_unload_req()
9195 bnx2x_load_count[path][0]--; in bnx2x_send_unload_req()
9196 bnx2x_load_count[path][1 + port]--; in bnx2x_send_unload_req()
9197 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", in bnx2x_send_unload_req()
9212 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
9229 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; in bnx2x_func_wait_started()
9231 if (!bp->port.pmf) in bnx2x_func_wait_started()
9238 * 2. Sync SP queue - this guarantees us that attention handling started in bnx2x_func_wait_started()
9242 * pending bit of transaction from STARTED-->TX_STOPPED, if we already in bnx2x_func_wait_started()
9244 * State will return to STARTED after completion of TX_STOPPED-->STARTED in bnx2x_func_wait_started()
9250 synchronize_irq(bp->msix_table[0].vector); in bnx2x_func_wait_started()
9252 synchronize_irq(bp->pdev->irq); in bnx2x_func_wait_started()
9257 while (bnx2x_func_get_state(bp, &bp->func_obj) != in bnx2x_func_wait_started()
9258 BNX2X_F_STATE_STARTED && tout--) in bnx2x_func_wait_started()
9261 if (bnx2x_func_get_state(bp, &bp->func_obj) != in bnx2x_func_wait_started()
9265 return -EBUSY; in bnx2x_func_wait_started()
9273 DP(NETIF_MSG_IFDOWN, in bnx2x_func_wait_started()
9274 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n"); in bnx2x_func_wait_started()
9276 func_params.f_obj = &bp->func_obj; in bnx2x_func_wait_started()
9280 /* STARTED-->TX_ST0PPED */ in bnx2x_func_wait_started()
9284 /* TX_ST0PPED-->STARTED */ in bnx2x_func_wait_started()
9316 /* Called during unload, to stop PTP-related stuff */
9322 cancel_work_sync(&bp->ptp_task); in bnx2x_stop_ptp()
9324 if (bp->ptp_tx_skb) { in bnx2x_stop_ptp()
9325 dev_kfree_skb_any(bp->ptp_tx_skb); in bnx2x_stop_ptp()
9326 bp->ptp_tx_skb = NULL; in bnx2x_stop_ptp()
9332 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n"); in bnx2x_stop_ptp()
9345 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_chip_cleanup()
9348 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); in bnx2x_chip_cleanup()
9359 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC, in bnx2x_chip_cleanup()
9365 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC, in bnx2x_chip_cleanup()
9390 netif_addr_lock_bh(bp->dev); in bnx2x_chip_cleanup()
9392 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) in bnx2x_chip_cleanup()
9393 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); in bnx2x_chip_cleanup()
9394 else if (bp->slowpath) in bnx2x_chip_cleanup()
9398 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_chip_cleanup()
9403 netif_addr_unlock_bh(bp->dev); in bnx2x_chip_cleanup()
9447 /* If SP settings didn't get completed so far - something in bnx2x_chip_cleanup()
9469 if (bp->flags & PTP_SUPPORTED) { in bnx2x_chip_cleanup()
9471 if (bp->ptp_clock) { in bnx2x_chip_cleanup()
9472 ptp_clock_unregister(bp->ptp_clock); in bnx2x_chip_cleanup()
9473 bp->ptp_clock = NULL; in bnx2x_chip_cleanup()
9477 if (!bp->nic_stopped) { in bnx2x_chip_cleanup()
9487 bp->nic_stopped = true; in bnx2x_chip_cleanup()
9495 if (!pci_channel_offline(bp->pdev)) { in bnx2x_chip_cleanup()
9509 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n"); in bnx2x_disable_close_the_gate()
9562 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", in bnx2x_set_234_gates()
9577 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
9591 * bnx2x_reset_mcp_prep - prepare for MCP reset.
9603 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n"); in bnx2x_reset_mcp_prep()
9623 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
9638 * initializes bp->common.shmem_base and waits for validity signature to appear
9646 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); in bnx2x_init_shmem()
9651 if (bp->common.shmem_base == 0xFFFFFFFF) { in bnx2x_init_shmem()
9652 bp->flags |= NO_MCP_FLAG; in bnx2x_init_shmem()
9653 return -ENODEV; in bnx2x_init_shmem()
9656 if (bp->common.shmem_base) { in bnx2x_init_shmem()
9668 return -ENODEV; in bnx2x_init_shmem()
9692 * - PCIE core
9693 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
9695 * - IGU
9696 * - MISC (including AEU)
9697 * - GRC
9698 * - RBCN, RBCP
9743 * - all xxMACs are handled by the bnx2x_link code. in bnx2x_process_kill_chip_reset()
9796 * bnx2x_er_poll_igu_vq - poll for pending writes bit.
9816 } while (cnt-- > 0); in bnx2x_er_poll_igu_vq()
9821 return -EBUSY; in bnx2x_er_poll_igu_vq()
9851 } while (cnt-- > 0); in bnx2x_process_kill()
9858 return -EAGAIN; in bnx2x_process_kill()
9868 return -EAGAIN; in bnx2x_process_kill()
9876 /* Wait for 1ms to empty GLUE and PCI-E core queues, in bnx2x_process_kill()
9901 return -EAGAIN; in bnx2x_process_kill()
9909 * reset state, re-enable attentions. */ in bnx2x_process_kill()
9920 /* if not going to reset MCP - load "fake" driver to reset HW while in bnx2x_leader_reset()
9928 rc = -EAGAIN; in bnx2x_leader_reset()
9934 rc = -EAGAIN; in bnx2x_leader_reset()
9940 rc = -EAGAIN; in bnx2x_leader_reset()
9949 rc = -EAGAIN; in bnx2x_leader_reset()
9968 bp->is_leader = 0; in bnx2x_leader_reset()
9976 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); in bnx2x_recovery_failed()
9979 netif_device_detach(bp->dev); in bnx2x_recovery_failed()
9990 bp->recovery_state = BNX2X_RECOVERY_FAILED; in bnx2x_recovery_failed()
9998 * will never be called when netif_running(bp->dev) is false.
10007 for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) { in bnx2x_parity_recover()
10011 vf->state = VF_LOST; in bnx2x_parity_recover()
10014 DP(NETIF_MSG_HW, "Handling parity\n"); in bnx2x_parity_recover()
10016 switch (bp->recovery_state) { in bnx2x_parity_recover()
10018 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); in bnx2x_parity_recover()
10034 bp->is_leader = 1; in bnx2x_parity_recover()
10038 /* If interface has been removed - break */ in bnx2x_parity_recover()
10042 bp->recovery_state = BNX2X_RECOVERY_WAIT; in bnx2x_parity_recover()
10052 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); in bnx2x_parity_recover()
10053 if (bp->is_leader) { in bnx2x_parity_recover()
10074 schedule_delayed_work(&bp->sp_rtnl_task, in bnx2x_parity_recover()
10078 /* If all other functions got down - in bnx2x_parity_recover()
10091 * to continue as a none-leader. in bnx2x_parity_recover()
10095 } else { /* non-leader */ in bnx2x_parity_recover()
10107 bp->is_leader = 1; in bnx2x_parity_recover()
10111 schedule_delayed_work(&bp->sp_rtnl_task, in bnx2x_parity_recover()
10122 &bp->sp_rtnl_task, in bnx2x_parity_recover()
10128 bp->eth_stats.recoverable_error; in bnx2x_parity_recover()
10130 bp->eth_stats.unrecoverable_error; in bnx2x_parity_recover()
10131 bp->recovery_state = in bnx2x_parity_recover()
10135 netdev_err(bp->dev, in bnx2x_parity_recover()
10138 netif_device_detach(bp->dev); in bnx2x_parity_recover()
10144 bp->recovery_state = in bnx2x_parity_recover()
10149 bp->eth_stats.recoverable_error = in bnx2x_parity_recover()
10151 bp->eth_stats.unrecoverable_error = in bnx2x_parity_recover()
10176 func_params.f_obj = &bp->func_obj; in bnx2x_udp_port_update()
10181 &switch_update_params->changes); in bnx2x_udp_port_update()
10183 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) { in bnx2x_udp_port_update()
10184 geneve_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]; in bnx2x_udp_port_update()
10185 switch_update_params->geneve_dst_port = geneve_port; in bnx2x_udp_port_update()
10188 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) { in bnx2x_udp_port_update()
10189 vxlan_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]; in bnx2x_udp_port_update()
10190 switch_update_params->vxlan_dst_port = vxlan_port; in bnx2x_udp_port_update()
10193 /* Re-enable inner-rss for the offloaded UDP tunnels */ in bnx2x_udp_port_update()
10195 &switch_update_params->changes); in bnx2x_udp_port_update()
10202 DP(BNX2X_MSG_SP, in bnx2x_udp_port_update()
10215 bp->udp_tunnel_ports[table] = be16_to_cpu(ti.port); in bnx2x_udp_tunnel_sync()
10241 if (!netif_running(bp->dev)) { in bnx2x_sp_rtnl_task()
10246 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { in bnx2x_sp_rtnl_task()
10256 bp->sp_rtnl_state = 0; in bnx2x_sp_rtnl_task()
10265 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10276 bp->sp_rtnl_state = 0; in bnx2x_sp_rtnl_task()
10280 bp->link_vars.link_up = 0; in bnx2x_sp_rtnl_task()
10281 bp->force_link_down = true; in bnx2x_sp_rtnl_task()
10282 netif_carrier_off(bp->dev); in bnx2x_sp_rtnl_task()
10283 BNX2X_ERR("Indicating link is down due to Tx-timeout\n"); in bnx2x_sp_rtnl_task()
10290 if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) { in bnx2x_sp_rtnl_task()
10301 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) in bnx2x_sp_rtnl_task()
10302 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); in bnx2x_sp_rtnl_task()
10303 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) in bnx2x_sp_rtnl_task()
10310 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10311 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); in bnx2x_sp_rtnl_task()
10312 netif_device_detach(bp->dev); in bnx2x_sp_rtnl_task()
10313 bnx2x_close(bp->dev); in bnx2x_sp_rtnl_task()
10318 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10319 DP(BNX2X_MSG_SP, in bnx2x_sp_rtnl_task()
10320 "sending set mcast vf pf channel message from rtnl sp-task\n"); in bnx2x_sp_rtnl_task()
10321 bnx2x_vfpf_set_mcast(bp->dev); in bnx2x_sp_rtnl_task()
10324 &bp->sp_rtnl_state)){ in bnx2x_sp_rtnl_task()
10325 if (netif_carrier_ok(bp->dev)) { in bnx2x_sp_rtnl_task()
10331 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10332 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n"); in bnx2x_sp_rtnl_task()
10337 &bp->sp_rtnl_state)) in bnx2x_sp_rtnl_task()
10340 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10346 &bp->sp_rtnl_state)) in bnx2x_sp_rtnl_task()
10349 if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state)) in bnx2x_sp_rtnl_task()
10352 /* work which needs rtnl lock not-taken (as it takes the lock itself and in bnx2x_sp_rtnl_task()
10357 /* enable SR-IOV if applicable */ in bnx2x_sp_rtnl_task()
10359 &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10369 if (!netif_running(bp->dev)) in bnx2x_period_task()
10380 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and in bnx2x_period_task()
10384 if (bp->port.pmf) { in bnx2x_period_task()
10385 bnx2x_period_func(&bp->link_params, &bp->link_vars); in bnx2x_period_task()
10387 /* Re-queue task in 1 sec */ in bnx2x_period_task()
10388 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); in bnx2x_period_task()
10403 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; in bnx2x_get_pretend_reg()
10419 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG; in bnx2x_prev_unload_close_umac()
10420 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]); in bnx2x_prev_unload_close_umac()
10421 REG_WR(bp, vals->umac_addr[port], 0); in bnx2x_prev_unload_close_umac()
10457 vals->bmac_addr = base_addr + offset; in bnx2x_prev_unload_close_mac()
10458 vals->bmac_val[0] = wb_data[0]; in bnx2x_prev_unload_close_mac()
10459 vals->bmac_val[1] = wb_data[1]; in bnx2x_prev_unload_close_mac()
10461 REG_WR(bp, vals->bmac_addr, wb_data[0]); in bnx2x_prev_unload_close_mac()
10462 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); in bnx2x_prev_unload_close_mac()
10465 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; in bnx2x_prev_unload_close_mac()
10466 vals->emac_val = REG_RD(bp, vals->emac_addr); in bnx2x_prev_unload_close_mac()
10467 REG_WR(bp, vals->emac_addr, 0); in bnx2x_prev_unload_close_mac()
10478 vals->xmac_addr = base_addr + XMAC_REG_CTRL; in bnx2x_prev_unload_close_mac()
10479 vals->xmac_val = REG_RD(bp, vals->xmac_addr); in bnx2x_prev_unload_close_mac()
10480 REG_WR(bp, vals->xmac_addr, 0); in bnx2x_prev_unload_close_mac()
10507 /* UNDI marks its presence in DORQ - in bnx2x_prev_is_after_undi()
10530 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2); in bnx2x_prev_unload_undi_inc()
10539 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n", in bnx2x_prev_unload_undi_inc()
10549 return -EBUSY; in bnx2x_prev_mcp_done()
10561 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && in bnx2x_prev_path_get_entry()
10562 bp->pdev->bus->number == tmp_list->bus && in bnx2x_prev_path_get_entry()
10563 BP_PATH(bp) == tmp_list->path) in bnx2x_prev_path_get_entry()
10582 tmp_list->aer = 1; in bnx2x_prev_path_mark_eeh()
10604 if (tmp_list->aer) { in bnx2x_prev_is_path_marked()
10605 DP(NETIF_MSG_HW, "Path %d was marked by AER\n", in bnx2x_prev_is_path_marked()
10627 val = !!(entry && (entry->undi & (1 << BP_PORT(bp)))); in bnx2x_port_after_undi()
10648 if (!tmp_list->aer) { in bnx2x_prev_mark_path()
10649 BNX2X_ERR("Re-Marking the path.\n"); in bnx2x_prev_mark_path()
10651 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n", in bnx2x_prev_mark_path()
10653 tmp_list->aer = 0; in bnx2x_prev_mark_path()
10664 return -ENOMEM; in bnx2x_prev_mark_path()
10667 tmp_list->bus = bp->pdev->bus->number; in bnx2x_prev_mark_path()
10668 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); in bnx2x_prev_mark_path()
10669 tmp_list->path = BP_PATH(bp); in bnx2x_prev_mark_path()
10670 tmp_list->aer = 0; in bnx2x_prev_mark_path()
10671 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0; in bnx2x_prev_mark_path()
10678 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n", in bnx2x_prev_mark_path()
10680 list_add(&tmp_list->list, &bnx2x_prev_list); in bnx2x_prev_mark_path()
10689 struct pci_dev *dev = bp->pdev; in bnx2x_do_flr()
10693 return -EINVAL; in bnx2x_do_flr()
10697 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { in bnx2x_do_flr()
10699 bp->common.bc_ver); in bnx2x_do_flr()
10700 return -EINVAL; in bnx2x_do_flr()
10704 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); in bnx2x_do_flr()
10784 bnx2x_set_rx_filter(&bp->link_params, 0); in bnx2x_prev_unload_common()
10785 bp->link_params.port ^= 1; in bnx2x_prev_unload_common()
10786 bnx2x_set_rx_filter(&bp->link_params, 0); in bnx2x_prev_unload_common()
10787 bp->link_params.port ^= 1; in bnx2x_prev_unload_common()
10816 timer_count--; in bnx2x_prev_unload_common()
10868 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); in bnx2x_prev_unload()
10894 rc = -EBUSY; in bnx2x_prev_unload()
10905 bnx2x_prev_path_get_entry(bp)->aer); in bnx2x_prev_unload()
10914 /* non-common reply from MCP might require looping */ in bnx2x_prev_unload()
10920 } while (--time_counter); in bnx2x_prev_unload()
10924 rc = -EPROBE_DEFER; in bnx2x_prev_unload()
10929 bp->link_params.feature_config_flags |= in bnx2x_prev_unload()
10943 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ in bnx2x_get_common_hwinfo()
10956 bp->common.chip_id = id; in bnx2x_get_common_hwinfo()
10961 bp->common.chip_id = (CHIP_NUM_57811 << 16) | in bnx2x_get_common_hwinfo()
10962 (bp->common.chip_id & 0x0000FFFF); in bnx2x_get_common_hwinfo()
10964 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | in bnx2x_get_common_hwinfo()
10965 (bp->common.chip_id & 0x0000FFFF); in bnx2x_get_common_hwinfo()
10966 bp->common.chip_id |= 0x1; in bnx2x_get_common_hwinfo()
10970 bp->db_size = (1 << BNX2X_DB_SHIFT); in bnx2x_get_common_hwinfo()
10980 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : in bnx2x_get_common_hwinfo()
10984 bp->pfid = (bp->pf_num >> 1); /* 0..3 */ in bnx2x_get_common_hwinfo()
10986 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ in bnx2x_get_common_hwinfo()
10988 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ in bnx2x_get_common_hwinfo()
10989 bp->pfid = bp->pf_num; /* 0..7 */ in bnx2x_get_common_hwinfo()
10992 BNX2X_DEV_INFO("pf_id: %x", bp->pfid); in bnx2x_get_common_hwinfo()
10994 bp->link_params.chip_id = bp->common.chip_id; in bnx2x_get_common_hwinfo()
10998 if ((bp->common.chip_id & 0x1) || in bnx2x_get_common_hwinfo()
11000 bp->flags |= ONE_PORT_FLAG; in bnx2x_get_common_hwinfo()
11005 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << in bnx2x_get_common_hwinfo()
11008 bp->common.flash_size, bp->common.flash_size); in bnx2x_get_common_hwinfo()
11012 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? in bnx2x_get_common_hwinfo()
11016 bp->link_params.shmem_base = bp->common.shmem_base; in bnx2x_get_common_hwinfo()
11017 bp->link_params.shmem2_base = bp->common.shmem2_base; in bnx2x_get_common_hwinfo()
11020 bp->link_params.lfa_base = in bnx2x_get_common_hwinfo()
11021 REG_RD(bp, bp->common.shmem2_base + in bnx2x_get_common_hwinfo()
11025 bp->link_params.lfa_base = 0; in bnx2x_get_common_hwinfo()
11027 bp->common.shmem_base, bp->common.shmem2_base); in bnx2x_get_common_hwinfo()
11029 if (!bp->common.shmem_base) { in bnx2x_get_common_hwinfo()
11031 bp->flags |= NO_MCP_FLAG; in bnx2x_get_common_hwinfo()
11035 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); in bnx2x_get_common_hwinfo()
11036 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); in bnx2x_get_common_hwinfo()
11038 bp->link_params.hw_led_mode = ((bp->common.hw_config & in bnx2x_get_common_hwinfo()
11042 bp->link_params.feature_config_flags = 0; in bnx2x_get_common_hwinfo()
11045 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11048 bp->link_params.feature_config_flags &= in bnx2x_get_common_hwinfo()
11052 bp->common.bc_ver = val; in bnx2x_get_common_hwinfo()
11060 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11064 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11067 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11070 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11074 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11078 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? in bnx2x_get_common_hwinfo()
11081 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ? in bnx2x_get_common_hwinfo()
11084 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? in bnx2x_get_common_hwinfo()
11087 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? in bnx2x_get_common_hwinfo()
11095 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; in bnx2x_get_common_hwinfo()
11098 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; in bnx2x_get_common_hwinfo()
11101 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; in bnx2x_get_common_hwinfo()
11104 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; in bnx2x_get_common_hwinfo()
11108 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc); in bnx2x_get_common_hwinfo()
11109 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; in bnx2x_get_common_hwinfo()
11112 (bp->flags & NO_WOL_FLAG) ? "not " : ""); in bnx2x_get_common_hwinfo()
11119 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", in bnx2x_get_common_hwinfo()
11133 bp->igu_base_sb = 0xff; in bnx2x_get_igu_cam_info()
11136 igu_sb_cnt = bp->igu_sb_cnt; in bnx2x_get_igu_cam_info()
11137 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * in bnx2x_get_igu_cam_info()
11140 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + in bnx2x_get_igu_cam_info()
11146 /* IGU in normal mode - read CAM */ in bnx2x_get_igu_cam_info()
11158 bp->igu_dsb_id = igu_sb_id; in bnx2x_get_igu_cam_info()
11160 if (bp->igu_base_sb == 0xff) in bnx2x_get_igu_cam_info()
11161 bp->igu_base_sb = igu_sb_id; in bnx2x_get_igu_cam_info()
11174 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); in bnx2x_get_igu_cam_info()
11179 return -EINVAL; in bnx2x_get_igu_cam_info()
11190 bp->port.supported[0] = 0; in bnx2x_link_settings_supported()
11191 bp->port.supported[1] = 0; in bnx2x_link_settings_supported()
11192 switch (bp->link_params.num_phys) { in bnx2x_link_settings_supported()
11194 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; in bnx2x_link_settings_supported()
11198 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; in bnx2x_link_settings_supported()
11202 if (bp->link_params.multi_phy_config & in bnx2x_link_settings_supported()
11204 bp->port.supported[1] = in bnx2x_link_settings_supported()
11205 bp->link_params.phy[EXT_PHY1].supported; in bnx2x_link_settings_supported()
11206 bp->port.supported[0] = in bnx2x_link_settings_supported()
11207 bp->link_params.phy[EXT_PHY2].supported; in bnx2x_link_settings_supported()
11209 bp->port.supported[0] = in bnx2x_link_settings_supported()
11210 bp->link_params.phy[EXT_PHY1].supported; in bnx2x_link_settings_supported()
11211 bp->port.supported[1] = in bnx2x_link_settings_supported()
11212 bp->link_params.phy[EXT_PHY2].supported; in bnx2x_link_settings_supported()
11218 if (!(bp->port.supported[0] || bp->port.supported[1])) { in bnx2x_link_settings_supported()
11219 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n", in bnx2x_link_settings_supported()
11228 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); in bnx2x_link_settings_supported()
11232 bp->port.phy_addr = REG_RD( in bnx2x_link_settings_supported()
11236 bp->port.phy_addr = REG_RD( in bnx2x_link_settings_supported()
11241 bp->port.link_config[0]); in bnx2x_link_settings_supported()
11245 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); in bnx2x_link_settings_supported()
11248 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11250 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; in bnx2x_link_settings_supported()
11252 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11254 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; in bnx2x_link_settings_supported()
11256 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11258 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; in bnx2x_link_settings_supported()
11260 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11262 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; in bnx2x_link_settings_supported()
11264 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11266 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | in bnx2x_link_settings_supported()
11269 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11271 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; in bnx2x_link_settings_supported()
11273 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11275 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; in bnx2x_link_settings_supported()
11277 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11279 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full; in bnx2x_link_settings_supported()
11282 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], in bnx2x_link_settings_supported()
11283 bp->port.supported[1]); in bnx2x_link_settings_supported()
11289 bp->port.advertising[0] = 0; in bnx2x_link_settings_requested()
11290 bp->port.advertising[1] = 0; in bnx2x_link_settings_requested()
11291 switch (bp->link_params.num_phys) { in bnx2x_link_settings_requested()
11301 bp->link_params.req_duplex[idx] = DUPLEX_FULL; in bnx2x_link_settings_requested()
11302 link_config = bp->port.link_config[idx]; in bnx2x_link_settings_requested()
11305 if (bp->port.supported[idx] & SUPPORTED_Autoneg) { in bnx2x_link_settings_requested()
11306 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11308 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11309 bp->port.supported[idx]; in bnx2x_link_settings_requested()
11310 if (bp->link_params.phy[EXT_PHY1].type == in bnx2x_link_settings_requested()
11312 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11317 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11319 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11327 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { in bnx2x_link_settings_requested()
11328 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11330 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11336 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11342 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { in bnx2x_link_settings_requested()
11343 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11345 bp->link_params.req_duplex[idx] = in bnx2x_link_settings_requested()
11347 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11353 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11359 if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11361 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11363 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11369 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11375 if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11377 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11379 bp->link_params.req_duplex[idx] = in bnx2x_link_settings_requested()
11381 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11387 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11393 if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11395 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11397 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11400 } else if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11402 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11404 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11409 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11415 if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11417 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11419 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11425 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11431 if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11433 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11435 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11438 } else if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11440 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11442 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11448 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11453 bp->link_params.req_line_speed[idx] = SPEED_20000; in bnx2x_link_settings_requested()
11459 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11461 bp->port.advertising[idx] = in bnx2x_link_settings_requested()
11462 bp->port.supported[idx]; in bnx2x_link_settings_requested()
11466 bp->link_params.req_flow_ctrl[idx] = (link_config & in bnx2x_link_settings_requested()
11468 if (bp->link_params.req_flow_ctrl[idx] == in bnx2x_link_settings_requested()
11470 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg)) in bnx2x_link_settings_requested()
11471 bp->link_params.req_flow_ctrl[idx] = in bnx2x_link_settings_requested()
11478 bp->link_params.req_line_speed[idx], in bnx2x_link_settings_requested()
11479 bp->link_params.req_duplex[idx], in bnx2x_link_settings_requested()
11480 bp->link_params.req_flow_ctrl[idx], in bnx2x_link_settings_requested()
11481 bp->port.advertising[idx]); in bnx2x_link_settings_requested()
11499 bp->link_params.bp = bp; in bnx2x_get_port_hwinfo()
11500 bp->link_params.port = port; in bnx2x_get_port_hwinfo()
11502 bp->link_params.lane_config = in bnx2x_get_port_hwinfo()
11505 bp->link_params.speed_cap_mask[0] = in bnx2x_get_port_hwinfo()
11509 bp->link_params.speed_cap_mask[1] = in bnx2x_get_port_hwinfo()
11513 bp->port.link_config[0] = in bnx2x_get_port_hwinfo()
11516 bp->port.link_config[1] = in bnx2x_get_port_hwinfo()
11519 bp->link_params.multi_phy_config = in bnx2x_get_port_hwinfo()
11525 bp->wol = (!(bp->flags & NO_WOL_FLAG) && in bnx2x_get_port_hwinfo()
11530 bp->flags |= NO_ISCSI_FLAG; in bnx2x_get_port_hwinfo()
11533 bp->flags |= NO_FCOE_FLAG; in bnx2x_get_port_hwinfo()
11536 bp->link_params.lane_config, in bnx2x_get_port_hwinfo()
11537 bp->link_params.speed_cap_mask[0], in bnx2x_get_port_hwinfo()
11538 bp->port.link_config[0]); in bnx2x_get_port_hwinfo()
11540 bp->link_params.switch_cfg = (bp->port.link_config[0] & in bnx2x_get_port_hwinfo()
11542 bnx2x_phy_probe(&bp->link_params); in bnx2x_get_port_hwinfo()
11543 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); in bnx2x_get_port_hwinfo()
11556 bp->mdio.prtad = bp->port.phy_addr; in bnx2x_get_port_hwinfo()
11560 bp->mdio.prtad = in bnx2x_get_port_hwinfo()
11569 bp->link_params.eee_mode = EEE_MODE_ADV_LPI | in bnx2x_get_port_hwinfo()
11573 bp->link_params.eee_mode = 0; in bnx2x_get_port_hwinfo()
11585 bp->flags |= no_flags; in bnx2x_get_iscsi_info()
11590 bp->cnic_eth_dev.max_iscsi_conn = in bnx2x_get_iscsi_info()
11595 bp->cnic_eth_dev.max_iscsi_conn); in bnx2x_get_iscsi_info()
11598 * If maximum allowed number of connections is zero - in bnx2x_get_iscsi_info()
11601 if (!bp->cnic_eth_dev.max_iscsi_conn) in bnx2x_get_iscsi_info()
11602 bp->flags |= no_flags; in bnx2x_get_iscsi_info()
11608 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = in bnx2x_get_ext_wwn_info()
11610 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = in bnx2x_get_ext_wwn_info()
11614 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = in bnx2x_get_ext_wwn_info()
11616 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = in bnx2x_get_ext_wwn_info()
11671 bp->flags |= NO_FCOE_FLAG; in bnx2x_get_fcoe_info()
11676 bp->cnic_eth_dev.max_fcoe_conn = in bnx2x_get_fcoe_info()
11681 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; in bnx2x_get_fcoe_info()
11685 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func; in bnx2x_get_fcoe_info()
11690 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = in bnx2x_get_fcoe_info()
11694 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = in bnx2x_get_fcoe_info()
11700 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = in bnx2x_get_fcoe_info()
11704 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = in bnx2x_get_fcoe_info()
11719 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); in bnx2x_get_fcoe_info()
11722 * If maximum allowed number of connections is zero - in bnx2x_get_fcoe_info()
11725 if (!bp->cnic_eth_dev.max_fcoe_conn) { in bnx2x_get_fcoe_info()
11726 bp->flags |= NO_FCOE_FLAG; in bnx2x_get_fcoe_info()
11727 eth_zero_addr(bp->fip_mac); in bnx2x_get_fcoe_info()
11747 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; in bnx2x_get_cnic_mac_hwinfo()
11748 u8 *fip_mac = bp->fip_mac; in bnx2x_get_cnic_mac_hwinfo()
11767 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; in bnx2x_get_cnic_mac_hwinfo()
11779 bp->flags |= NO_FCOE_FLAG; in bnx2x_get_cnic_mac_hwinfo()
11782 bp->mf_ext_config = cfg; in bnx2x_get_cnic_mac_hwinfo()
11787 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); in bnx2x_get_cnic_mac_hwinfo()
11794 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); in bnx2x_get_cnic_mac_hwinfo()
11801 /* If this is a storage-only interface, use SAN mac as in bnx2x_get_cnic_mac_hwinfo()
11806 eth_hw_addr_set(bp->dev, fip_mac); in bnx2x_get_cnic_mac_hwinfo()
11823 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; in bnx2x_get_cnic_mac_hwinfo()
11829 bp->flags |= NO_FCOE_FLAG; in bnx2x_get_cnic_mac_hwinfo()
11830 eth_zero_addr(bp->fip_mac); in bnx2x_get_cnic_mac_hwinfo()
11842 eth_hw_addr_set(bp->dev, addr); in bnx2x_get_mac_hwinfo()
11846 eth_hw_addr_random(bp->dev); in bnx2x_get_mac_hwinfo()
11853 eth_hw_addr_set(bp->dev, addr); in bnx2x_get_mac_hwinfo()
11863 eth_hw_addr_set(bp->dev, addr); in bnx2x_get_mac_hwinfo()
11873 bnx2x_set_mac_buf(bp->phys_port_id, val, val2); in bnx2x_get_mac_hwinfo()
11874 bp->flags |= HAS_PHYS_PORT_ID; in bnx2x_get_mac_hwinfo()
11877 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); in bnx2x_get_mac_hwinfo()
11879 if (!is_valid_ether_addr(bp->dev->dev_addr)) in bnx2x_get_mac_hwinfo()
11880 dev_err(&bp->pdev->dev, in bnx2x_get_mac_hwinfo()
11883 bp->dev->dev_addr); in bnx2x_get_mac_hwinfo()
11918 bp->mf_mode = MULTI_FUNCTION_SI; in validate_set_si_mode()
11919 bp->mf_config[BP_VN(bp)] = in validate_set_si_mode()
11934 dev_err(&bp->pdev->dev, in bnx2x_get_hwinfo()
11936 return -EINVAL; in bnx2x_get_hwinfo()
11945 bp->common.int_block = INT_BLOCK_HC; in bnx2x_get_hwinfo()
11947 bp->igu_dsb_id = DEF_SB_IGU_ID; in bnx2x_get_hwinfo()
11948 bp->igu_base_sb = 0; in bnx2x_get_hwinfo()
11950 bp->common.int_block = INT_BLOCK_IGU; in bnx2x_get_hwinfo()
11967 tout--; in bnx2x_get_hwinfo()
11972 dev_err(&bp->pdev->dev, in bnx2x_get_hwinfo()
11976 return -EPERM; in bnx2x_get_hwinfo()
11982 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; in bnx2x_get_hwinfo()
11993 * set base FW non-default (fast path) status block id, this value is in bnx2x_get_hwinfo()
11998 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); in bnx2x_get_hwinfo()
12000 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of in bnx2x_get_hwinfo()
12004 bp->base_fw_ndsb = bp->igu_base_sb; in bnx2x_get_hwinfo()
12007 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, in bnx2x_get_hwinfo()
12008 bp->igu_sb_cnt, bp->base_fw_ndsb); in bnx2x_get_hwinfo()
12013 bp->mf_ov = 0; in bnx2x_get_hwinfo()
12014 bp->mf_mode = 0; in bnx2x_get_hwinfo()
12015 bp->mf_sub_mode = 0; in bnx2x_get_hwinfo()
12020 bp->common.shmem2_base, SHMEM2_RD(bp, size), in bnx2x_get_hwinfo()
12024 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); in bnx2x_get_hwinfo()
12026 bp->common.mf_cfg_base = bp->common.shmem_base + in bnx2x_get_hwinfo()
12033 * for Switch-Independent mode; in bnx2x_get_hwinfo()
12034 * OVLAN must be legal for Switch-Dependent mode in bnx2x_get_hwinfo()
12037 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { in bnx2x_get_hwinfo()
12053 bp->mf_mode = MULTI_FUNCTION_AFEX; in bnx2x_get_hwinfo()
12054 bp->mf_config[vn] = MF_CFG_RD(bp, in bnx2x_get_hwinfo()
12067 bp->mf_mode = MULTI_FUNCTION_SD; in bnx2x_get_hwinfo()
12068 bp->mf_config[vn] = MF_CFG_RD(bp, in bnx2x_get_hwinfo()
12074 bp->mf_mode = MULTI_FUNCTION_SD; in bnx2x_get_hwinfo()
12075 bp->mf_sub_mode = SUB_MF_MODE_BD; in bnx2x_get_hwinfo()
12076 bp->mf_config[vn] = in bnx2x_get_hwinfo()
12087 DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n", in bnx2x_get_hwinfo()
12094 bp->dev->mtu = mtu_size; in bnx2x_get_hwinfo()
12098 bp->mf_mode = MULTI_FUNCTION_SD; in bnx2x_get_hwinfo()
12099 bp->mf_sub_mode = SUB_MF_MODE_UFP; in bnx2x_get_hwinfo()
12100 bp->mf_config[vn] = in bnx2x_get_hwinfo()
12105 bp->mf_config[vn] = 0; in bnx2x_get_hwinfo()
12114 bp->mf_sub_mode = in bnx2x_get_hwinfo()
12119 bp->mf_config[vn] = 0; in bnx2x_get_hwinfo()
12126 bp->mf_config[vn] = 0; in bnx2x_get_hwinfo()
12134 switch (bp->mf_mode) { in bnx2x_get_hwinfo()
12139 bp->mf_ov = val; in bnx2x_get_hwinfo()
12140 bp->path_has_ovlan = true; in bnx2x_get_hwinfo()
12143 func, bp->mf_ov, bp->mf_ov); in bnx2x_get_hwinfo()
12144 } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) || in bnx2x_get_hwinfo()
12145 (bp->mf_sub_mode == SUB_MF_MODE_BD)) { in bnx2x_get_hwinfo()
12146 dev_err(&bp->pdev->dev, in bnx2x_get_hwinfo()
12147 "Unexpected - no valid MF OV for func %d in UFP/BD mode\n", in bnx2x_get_hwinfo()
12149 bp->path_has_ovlan = true; in bnx2x_get_hwinfo()
12151 dev_err(&bp->pdev->dev, in bnx2x_get_hwinfo()
12154 return -EPERM; in bnx2x_get_hwinfo()
12161 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", in bnx2x_get_hwinfo()
12166 dev_err(&bp->pdev->dev, in bnx2x_get_hwinfo()
12169 return -EPERM; in bnx2x_get_hwinfo()
12180 !bp->path_has_ovlan && in bnx2x_get_hwinfo()
12182 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { in bnx2x_get_hwinfo()
12188 bp->path_has_ovlan = true; in bnx2x_get_hwinfo()
12194 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); in bnx2x_get_hwinfo()
12214 memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); in bnx2x_read_fwinfo()
12216 vpd_data = pci_vpd_alloc(bp->pdev, &vpd_len); in bnx2x_read_fwinfo()
12231 if (rodi >= 0 && kw_len < sizeof(bp->fw_ver)) { in bnx2x_read_fwinfo()
12232 memcpy(bp->fw_ver, &vpd_data[rodi], kw_len); in bnx2x_read_fwinfo()
12233 bp->fw_ver[kw_len] = ' '; in bnx2x_read_fwinfo()
12268 switch (bp->mf_mode) { in bnx2x_set_modes_bitmap()
12295 mutex_init(&bp->port.phy_mutex); in bnx2x_init_bp()
12296 mutex_init(&bp->fw_mb_mutex); in bnx2x_init_bp()
12297 mutex_init(&bp->drv_info_mutex); in bnx2x_init_bp()
12298 sema_init(&bp->stats_lock, 1); in bnx2x_init_bp()
12299 bp->drv_info_mng_owner = false; in bnx2x_init_bp()
12300 INIT_LIST_HEAD(&bp->vlan_reg); in bnx2x_init_bp()
12302 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); in bnx2x_init_bp()
12303 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); in bnx2x_init_bp()
12304 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); in bnx2x_init_bp()
12305 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task); in bnx2x_init_bp()
12313 eth_hw_addr_set(bp->dev, zero_addr); in bnx2x_init_bp()
12329 bp->fw_seq = in bnx2x_init_bp()
12332 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); in bnx2x_init_bp()
12342 dev_err(&bp->pdev->dev, "FPGA detected\n"); in bnx2x_init_bp()
12345 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); in bnx2x_init_bp()
12347 bp->disable_tpa = disable_tpa; in bnx2x_init_bp()
12348 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp); in bnx2x_init_bp()
12350 bp->disable_tpa |= is_kdump_kernel(); in bnx2x_init_bp()
12353 if (bp->disable_tpa) { in bnx2x_init_bp()
12354 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); in bnx2x_init_bp()
12355 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); in bnx2x_init_bp()
12359 bp->dropless_fc = false; in bnx2x_init_bp()
12361 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp); in bnx2x_init_bp()
12363 bp->mrrs = mrrs; in bnx2x_init_bp()
12365 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL; in bnx2x_init_bp()
12367 bp->rx_ring_size = MAX_RX_AVAIL; in bnx2x_init_bp()
12370 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; in bnx2x_init_bp()
12371 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; in bnx2x_init_bp()
12373 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; in bnx2x_init_bp()
12375 timer_setup(&bp->timer, bnx2x_timer, 0); in bnx2x_init_bp()
12376 bp->timer.expires = jiffies + bp->current_interval; in bnx2x_init_bp()
12391 bp->cnic_base_cl_id = FP_SB_MAX_E1x; in bnx2x_init_bp()
12393 bp->cnic_base_cl_id = FP_SB_MAX_E2; in bnx2x_init_bp()
12397 bp->max_cos = 1; in bnx2x_init_bp()
12399 bp->max_cos = BNX2X_MULTI_TX_COS_E1X; in bnx2x_init_bp()
12401 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; in bnx2x_init_bp()
12403 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; in bnx2x_init_bp()
12407 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos); in bnx2x_init_bp()
12409 /* We need at least one default status block for slow-path events, in bnx2x_init_bp()
12414 bp->min_msix_vec_cnt = 1; in bnx2x_init_bp()
12416 bp->min_msix_vec_cnt = 3; in bnx2x_init_bp()
12418 bp->min_msix_vec_cnt = 2; in bnx2x_init_bp()
12419 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); in bnx2x_init_bp()
12421 bp->dump_preset_idx = 1; in bnx2x_init_bp()
12440 bp->stats_init = true; in bnx2x_open()
12479 netdev_info(bp->dev, in bnx2x_open()
12486 bp->recovery_state = BNX2X_RECOVERY_FAILED; in bnx2x_open()
12491 return -EAGAIN; in bnx2x_open()
12496 bp->recovery_state = BNX2X_RECOVERY_DONE; in bnx2x_open()
12522 ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
12533 list_del(&current_mcast_group->mcast_group_link); in bnx2x_free_mcast_macs_list()
12545 int mc_count = netdev_mc_count(bp->dev); in bnx2x_init_mcast_macs_list()
12548 INIT_LIST_HEAD(&p->mcast_list); in bnx2x_init_mcast_macs_list()
12549 netdev_for_each_mc_addr(ha, bp->dev) { in bnx2x_init_mcast_macs_list()
12557 return -ENOMEM; in bnx2x_init_mcast_macs_list()
12559 list_add(&current_mcast_group->mcast_group_link, in bnx2x_init_mcast_macs_list()
12562 mc_mac = &current_mcast_group->mcast_elems[offset]; in bnx2x_init_mcast_macs_list()
12563 mc_mac->mac = bnx2x_mc_addr(ha); in bnx2x_init_mcast_macs_list()
12564 list_add_tail(&mc_mac->link, &p->mcast_list); in bnx2x_init_mcast_macs_list()
12569 p->mcast_list_len = mc_count; in bnx2x_init_mcast_macs_list()
12574 * bnx2x_set_uc_list - configure a new unicast MACs list.
12583 struct net_device *dev = bp->dev; in bnx2x_set_uc_list()
12585 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; in bnx2x_set_uc_list()
12598 if (rc == -EEXIST) { in bnx2x_set_uc_list()
12599 DP(BNX2X_MSG_SP, in bnx2x_set_uc_list()
12621 struct net_device *dev = bp->dev; in bnx2x_set_mc_list_e1x()
12625 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_set_mc_list_e1x()
12657 struct net_device *dev = bp->dev; in bnx2x_set_mc_list()
12660 /* On older adapters, we need to flush and re-add filters */ in bnx2x_set_mc_list()
12664 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_set_mc_list()
12690 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
12695 if (bp->state != BNX2X_STATE_OPEN) { in bnx2x_set_rx_mode()
12696 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); in bnx2x_set_rx_mode()
12709 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); in bnx2x_set_rx_mode_inner()
12711 netif_addr_lock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12713 if (bp->dev->flags & IFF_PROMISC) { in bnx2x_set_rx_mode_inner()
12715 } else if ((bp->dev->flags & IFF_ALLMULTI) || in bnx2x_set_rx_mode_inner()
12716 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) && in bnx2x_set_rx_mode_inner()
12726 netif_addr_unlock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12729 netif_addr_lock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12739 bp->rx_mode = rx_mode; in bnx2x_set_rx_mode_inner()
12742 bp->rx_mode = BNX2X_RX_MODE_NONE; in bnx2x_set_rx_mode_inner()
12745 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { in bnx2x_set_rx_mode_inner()
12746 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); in bnx2x_set_rx_mode_inner()
12747 netif_addr_unlock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12753 netif_addr_unlock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12756 * the VF needs to release the bottom-half lock prior to the in bnx2x_set_rx_mode_inner()
12759 netif_addr_unlock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12772 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", in bnx2x_mdio_read()
12779 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); in bnx2x_mdio_read()
12781 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); in bnx2x_mdio_read()
12795 DP(NETIF_MSG_LINK, in bnx2x_mdio_write()
12803 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); in bnx2x_mdio_write()
12815 return -EAGAIN; in bnx2x_ioctl()
12821 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", in bnx2x_ioctl()
12822 mdio->phy_id, mdio->reg_num, mdio->val_in); in bnx2x_ioctl()
12823 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); in bnx2x_ioctl()
12835 if (!is_valid_ether_addr(dev->dev_addr)) { in bnx2x_validate_addr()
12836 BNX2X_ERR("Non-valid Ethernet address\n"); in bnx2x_validate_addr()
12837 return -EADDRNOTAVAIL; in bnx2x_validate_addr()
12847 if (!(bp->flags & HAS_PHYS_PORT_ID)) in bnx2x_get_phys_port_id()
12848 return -EOPNOTSUPP; in bnx2x_get_phys_port_id()
12850 ppid->id_len = sizeof(bp->phys_port_id); in bnx2x_get_phys_port_id()
12851 memcpy(ppid->id, bp->phys_port_id, ppid->id_len); in bnx2x_get_phys_port_id()
12874 (skb_shinfo(skb)->gso_size > 9000) && in bnx2x_features_check()
12890 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj, in __bnx2x_vlan_configure_vid()
12893 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add); in __bnx2x_vlan_configure_vid()
12904 /* Configure all non-configured entries */ in bnx2x_vlan_configure_vid_list()
12905 list_for_each_entry(vlan, &bp->vlan_reg, link) { in bnx2x_vlan_configure_vid_list()
12906 if (vlan->hw) in bnx2x_vlan_configure_vid_list()
12909 if (bp->vlan_cnt >= bp->vlan_credit) in bnx2x_vlan_configure_vid_list()
12910 return -ENOBUFS; in bnx2x_vlan_configure_vid_list()
12912 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); in bnx2x_vlan_configure_vid_list()
12914 BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid); in bnx2x_vlan_configure_vid_list()
12918 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid); in bnx2x_vlan_configure_vid_list()
12919 vlan->hw = true; in bnx2x_vlan_configure_vid_list()
12920 bp->vlan_cnt++; in bnx2x_vlan_configure_vid_list()
12932 if (bp->accept_any_vlan != need_accept_any_vlan) { in bnx2x_vlan_configure()
12933 bp->accept_any_vlan = need_accept_any_vlan; in bnx2x_vlan_configure()
12934 DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n", in bnx2x_vlan_configure()
12935 bp->accept_any_vlan ? "raised" : "cleared"); in bnx2x_vlan_configure()
12958 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); in bnx2x_vlan_rx_add_vid()
12962 return -ENOMEM; in bnx2x_vlan_rx_add_vid()
12964 vlan->vid = vid; in bnx2x_vlan_rx_add_vid()
12965 vlan->hw = false; in bnx2x_vlan_rx_add_vid()
12966 list_add_tail(&vlan->link, &bp->vlan_reg); in bnx2x_vlan_rx_add_vid()
12981 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); in bnx2x_vlan_rx_kill_vid()
12983 list_for_each_entry(vlan, &bp->vlan_reg, link) in bnx2x_vlan_rx_kill_vid()
12984 if (vlan->vid == vid) { in bnx2x_vlan_rx_kill_vid()
12990 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); in bnx2x_vlan_rx_kill_vid()
12991 return -EINVAL; in bnx2x_vlan_rx_kill_vid()
12994 if (netif_running(dev) && vlan->hw) { in bnx2x_vlan_rx_kill_vid()
12996 DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid); in bnx2x_vlan_rx_kill_vid()
12997 bp->vlan_cnt--; in bnx2x_vlan_rx_kill_vid()
13000 list_del(&vlan->link); in bnx2x_vlan_rx_kill_vid()
13006 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); in bnx2x_vlan_rx_kill_vid()
13051 SET_NETDEV_DEV(dev, &pdev->dev); in bnx2x_init_dev()
13053 bp->dev = dev; in bnx2x_init_dev()
13054 bp->pdev = pdev; in bnx2x_init_dev()
13058 dev_err(&bp->pdev->dev, in bnx2x_init_dev()
13064 dev_err(&bp->pdev->dev, in bnx2x_init_dev()
13066 rc = -ENODEV; in bnx2x_init_dev()
13071 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n"); in bnx2x_init_dev()
13072 rc = -ENODEV; in bnx2x_init_dev()
13080 rc = -ENODEV; in bnx2x_init_dev()
13084 if (atomic_read(&pdev->enable_cnt) == 1) { in bnx2x_init_dev()
13087 dev_err(&bp->pdev->dev, in bnx2x_init_dev()
13097 if (!pdev->pm_cap) { in bnx2x_init_dev()
13098 dev_err(&bp->pdev->dev, in bnx2x_init_dev()
13100 rc = -EIO; in bnx2x_init_dev()
13106 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); in bnx2x_init_dev()
13107 rc = -EIO; in bnx2x_init_dev()
13111 rc = dma_set_mask_and_coherent(&bp->pdev->dev, DMA_BIT_MASK(64)); in bnx2x_init_dev()
13113 dev_err(&bp->pdev->dev, "System does not support DMA, aborting\n"); in bnx2x_init_dev()
13117 dev->mem_start = pci_resource_start(pdev, 0); in bnx2x_init_dev()
13118 dev->base_addr = dev->mem_start; in bnx2x_init_dev()
13119 dev->mem_end = pci_resource_end(pdev, 0); in bnx2x_init_dev()
13121 dev->irq = pdev->irq; in bnx2x_init_dev()
13123 bp->regview = pci_ioremap_bar(pdev, 0); in bnx2x_init_dev()
13124 if (!bp->regview) { in bnx2x_init_dev()
13125 dev_err(&bp->pdev->dev, in bnx2x_init_dev()
13127 rc = -ENOMEM; in bnx2x_init_dev()
13137 bp->pf_num = PCI_FUNC(pdev->devfn); in bnx2x_init_dev()
13140 pci_read_config_dword(bp->pdev, in bnx2x_init_dev()
13142 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> in bnx2x_init_dev()
13145 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); in bnx2x_init_dev()
13148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, in bnx2x_init_dev()
13152 pdev->needs_freset = 1; in bnx2x_init_dev()
13171 /* Enable internal target-read (in case we are probed after PF in bnx2x_init_dev()
13180 dev->watchdog_timeo = TX_TIMEOUT; in bnx2x_init_dev()
13182 dev->netdev_ops = &bnx2x_netdev_ops; in bnx2x_init_dev()
13185 dev->priv_flags |= IFF_UNICAST_FLT; in bnx2x_init_dev()
13187 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in bnx2x_init_dev()
13192 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | in bnx2x_init_dev()
13198 dev->hw_enc_features = in bnx2x_init_dev()
13206 dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM | in bnx2x_init_dev()
13210 dev->udp_tunnel_nic_info = &bnx2x_udp_tunnels; in bnx2x_init_dev()
13213 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in bnx2x_init_dev()
13218 bp->accept_any_vlan = true; in bnx2x_init_dev()
13220 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; in bnx2x_init_dev()
13226 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; in bnx2x_init_dev()
13227 dev->features |= NETIF_F_HIGHDMA; in bnx2x_init_dev()
13228 if (dev->features & NETIF_F_LRO) in bnx2x_init_dev()
13229 dev->features &= ~NETIF_F_GRO_HW; in bnx2x_init_dev()
13232 dev->hw_features |= NETIF_F_LOOPBACK; in bnx2x_init_dev()
13235 dev->dcbnl_ops = &bnx2x_dcbnl_ops; in bnx2x_init_dev()
13238 /* MTU range, 46 - 9600 */ in bnx2x_init_dev()
13239 dev->min_mtu = ETH_MIN_PACKET_SIZE; in bnx2x_init_dev()
13240 dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE; in bnx2x_init_dev()
13243 bp->mdio.prtad = MDIO_PRTAD_NONE; in bnx2x_init_dev()
13244 bp->mdio.mmds = 0; in bnx2x_init_dev()
13245 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; in bnx2x_init_dev()
13246 bp->mdio.dev = dev; in bnx2x_init_dev()
13247 bp->mdio.mdio_read = bnx2x_mdio_read; in bnx2x_init_dev()
13248 bp->mdio.mdio_write = bnx2x_mdio_write; in bnx2x_init_dev()
13253 if (atomic_read(&pdev->enable_cnt) == 1) in bnx2x_init_dev()
13265 const struct firmware *firmware = bp->firmware; in bnx2x_check_firmware()
13273 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) { in bnx2x_check_firmware()
13275 return -EINVAL; in bnx2x_check_firmware()
13278 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; in bnx2x_check_firmware()
13286 if (offset + len > firmware->size) { in bnx2x_check_firmware()
13288 return -EINVAL; in bnx2x_check_firmware()
13293 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); in bnx2x_check_firmware()
13294 ops_offsets = (__force __be16 *)(firmware->data + offset); in bnx2x_check_firmware()
13295 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); in bnx2x_check_firmware()
13297 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { in bnx2x_check_firmware()
13300 return -EINVAL; in bnx2x_check_firmware()
13305 offset = be32_to_cpu(fw_hdr->fw_version.offset); in bnx2x_check_firmware()
13306 fw_ver = firmware->data + offset; in bnx2x_check_firmware()
13307 if (fw_ver[0] != bp->fw_major || fw_ver[1] != bp->fw_minor || in bnx2x_check_firmware()
13308 fw_ver[2] != bp->fw_rev || fw_ver[3] != bp->fw_eng) { in bnx2x_check_firmware()
13311 bp->fw_major, bp->fw_minor, bp->fw_rev, bp->fw_eng); in bnx2x_check_firmware()
13312 return -EINVAL; in bnx2x_check_firmware()
13381 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13382 bp->arr = kmalloc(len, GFP_KERNEL); \
13383 if (!bp->arr) \
13385 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13386 (u8 *)bp->arr, len); \
13395 if (bp->firmware) in bnx2x_init_firmware()
13409 return -EINVAL; in bnx2x_init_firmware()
13414 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); in bnx2x_init_firmware()
13419 rc = request_firmware(&bp->firmware, fw_file_name_v15, &bp->pdev->dev); in bnx2x_init_firmware()
13424 bp->fw_rev = BCM_5710_FW_REVISION_VERSION_V15; in bnx2x_init_firmware()
13426 bp->fw_cap |= FW_CAP_INVALIDATE_VF_FP_HSI; in bnx2x_init_firmware()
13427 bp->fw_rev = BCM_5710_FW_REVISION_VERSION; in bnx2x_init_firmware()
13430 bp->fw_major = BCM_5710_FW_MAJOR_VERSION; in bnx2x_init_firmware()
13431 bp->fw_minor = BCM_5710_FW_MINOR_VERSION; in bnx2x_init_firmware()
13432 bp->fw_eng = BCM_5710_FW_ENGINEERING_VERSION; in bnx2x_init_firmware()
13440 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; in bnx2x_init_firmware()
13444 rc = -ENOMEM; in bnx2x_init_firmware()
13455 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13456 be32_to_cpu(fw_hdr->tsem_int_table_data.offset); in bnx2x_init_firmware()
13457 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13458 be32_to_cpu(fw_hdr->tsem_pram_data.offset); in bnx2x_init_firmware()
13459 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13460 be32_to_cpu(fw_hdr->usem_int_table_data.offset); in bnx2x_init_firmware()
13461 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13462 be32_to_cpu(fw_hdr->usem_pram_data.offset); in bnx2x_init_firmware()
13463 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13464 be32_to_cpu(fw_hdr->xsem_int_table_data.offset); in bnx2x_init_firmware()
13465 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13466 be32_to_cpu(fw_hdr->xsem_pram_data.offset); in bnx2x_init_firmware()
13467 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13468 be32_to_cpu(fw_hdr->csem_int_table_data.offset); in bnx2x_init_firmware()
13469 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13470 be32_to_cpu(fw_hdr->csem_pram_data.offset); in bnx2x_init_firmware()
13477 kfree(bp->init_ops_offsets); in bnx2x_init_firmware()
13479 kfree(bp->init_ops); in bnx2x_init_firmware()
13481 kfree(bp->init_data); in bnx2x_init_firmware()
13483 release_firmware(bp->firmware); in bnx2x_init_firmware()
13484 bp->firmware = NULL; in bnx2x_init_firmware()
13491 kfree(bp->init_ops_offsets); in bnx2x_release_firmware()
13492 kfree(bp->init_ops); in bnx2x_release_firmware()
13493 kfree(bp->init_data); in bnx2x_release_firmware()
13494 release_firmware(bp->firmware); in bnx2x_release_firmware()
13495 bp->firmware = NULL; in bnx2x_release_firmware()
13520 bnx2x_init_func_obj(bp, &bp->func_obj, in bnx2x__init_func_obj()
13528 /* must be called after sriov-enable */
13543 * bnx2x_get_num_non_def_sbs - return the number of none default SBs
13554 * If MSI-X is not supported - return number of SBs needed to support in bnx2x_get_num_non_def_sbs()
13557 if (!pdev->msix_cap) { in bnx2x_get_num_non_def_sbs()
13558 dev_info(&pdev->dev, "no msix capability found\n"); in bnx2x_get_num_non_def_sbs()
13561 dev_info(&pdev->dev, "msix capability found\n"); in bnx2x_get_num_non_def_sbs()
13570 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control); in bnx2x_get_num_non_def_sbs()
13607 return -ENODEV; in set_max_cos_est()
13647 func_params.f_obj = &bp->func_obj; in bnx2x_send_update_drift_ramrod()
13651 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET; in bnx2x_send_update_drift_ramrod()
13652 set_timesync_params->offset_cmd = TS_OFFSET_KEEP; in bnx2x_send_update_drift_ramrod()
13653 set_timesync_params->add_sub_drift_adjust_value = in bnx2x_send_update_drift_ramrod()
13655 set_timesync_params->drift_adjust_value = best_val; in bnx2x_send_update_drift_ramrod()
13656 set_timesync_params->drift_adjust_period = best_period; in bnx2x_send_update_drift_ramrod()
13670 DP(BNX2X_MSG_PTP, "PTP adjfine called, ppb = %d\n", ppb); in bnx2x_ptp_adjfine()
13672 if (!netif_running(bp->dev)) { in bnx2x_ptp_adjfine()
13673 DP(BNX2X_MSG_PTP, in bnx2x_ptp_adjfine()
13675 return -ENETDOWN; in bnx2x_ptp_adjfine()
13679 ppb = -ppb; in bnx2x_ptp_adjfine()
13699 dif1 = ppb - (val * 1000000 / period1); in bnx2x_ptp_adjfine()
13703 dif1 = -dif1; in bnx2x_ptp_adjfine()
13704 dif2 = ppb - (val * 1000000 / period2); in bnx2x_ptp_adjfine()
13706 dif2 = -dif2; in bnx2x_ptp_adjfine()
13721 return -EFAULT; in bnx2x_ptp_adjfine()
13724 DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val, in bnx2x_ptp_adjfine()
13734 if (!netif_running(bp->dev)) { in bnx2x_ptp_adjtime()
13735 DP(BNX2X_MSG_PTP, in bnx2x_ptp_adjtime()
13737 return -ENETDOWN; in bnx2x_ptp_adjtime()
13740 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta); in bnx2x_ptp_adjtime()
13742 timecounter_adjtime(&bp->timecounter, delta); in bnx2x_ptp_adjtime()
13752 if (!netif_running(bp->dev)) { in bnx2x_ptp_gettime()
13753 DP(BNX2X_MSG_PTP, in bnx2x_ptp_gettime()
13755 return -ENETDOWN; in bnx2x_ptp_gettime()
13758 ns = timecounter_read(&bp->timecounter); in bnx2x_ptp_gettime()
13760 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns); in bnx2x_ptp_gettime()
13773 if (!netif_running(bp->dev)) { in bnx2x_ptp_settime()
13774 DP(BNX2X_MSG_PTP, in bnx2x_ptp_settime()
13776 return -ENETDOWN; in bnx2x_ptp_settime()
13781 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns); in bnx2x_ptp_settime()
13783 /* Re-init the timecounter */ in bnx2x_ptp_settime()
13784 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns); in bnx2x_ptp_settime()
13796 return -ENOTSUPP; in bnx2x_ptp_enable()
13802 bp->ptp_clock_info.owner = THIS_MODULE; in bnx2x_register_phc()
13803 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name); in bnx2x_register_phc()
13804 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */ in bnx2x_register_phc()
13805 bp->ptp_clock_info.n_alarm = 0; in bnx2x_register_phc()
13806 bp->ptp_clock_info.n_ext_ts = 0; in bnx2x_register_phc()
13807 bp->ptp_clock_info.n_per_out = 0; in bnx2x_register_phc()
13808 bp->ptp_clock_info.pps = 0; in bnx2x_register_phc()
13809 bp->ptp_clock_info.adjfine = bnx2x_ptp_adjfine; in bnx2x_register_phc()
13810 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime; in bnx2x_register_phc()
13811 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime; in bnx2x_register_phc()
13812 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime; in bnx2x_register_phc()
13813 bp->ptp_clock_info.enable = bnx2x_ptp_enable; in bnx2x_register_phc()
13815 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); in bnx2x_register_phc()
13816 if (IS_ERR(bp->ptp_clock)) { in bnx2x_register_phc()
13817 bp->ptp_clock = NULL; in bnx2x_register_phc()
13834 * to forget previously living interfaces, allowing a proper re-load. in bnx2x_init_one()
13849 * initialization of bp->max_cos based on the chip versions AND chip in bnx2x_init_one()
13852 max_cos_est = set_max_cos_est(ent->driver_data); in bnx2x_init_one()
13855 is_vf = set_is_vf(ent->driver_data); in bnx2x_init_one()
13864 rss_count = max_non_def_sbs - cnic_cnt; in bnx2x_init_one()
13867 return -EINVAL; in bnx2x_init_one()
13880 return -ENOMEM; in bnx2x_init_one()
13884 bp->flags = 0; in bnx2x_init_one()
13886 bp->flags |= IS_VF_FLAG; in bnx2x_init_one()
13888 bp->igu_sb_cnt = max_non_def_sbs; in bnx2x_init_one()
13889 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; in bnx2x_init_one()
13890 bp->msg_enable = debug; in bnx2x_init_one()
13891 bp->cnic_support = cnic_cnt; in bnx2x_init_one()
13892 bp->cnic_probe = bnx2x_cnic_probe; in bnx2x_init_one()
13896 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data); in bnx2x_init_one()
13913 /* Map doorbells here as we need the real value of bp->max_cos which in bnx2x_init_one()
13918 bp->doorbells = bnx2x_vf_doorbells(bp); in bnx2x_init_one()
13925 dev_err(&bp->pdev->dev, in bnx2x_init_one()
13927 rc = -ENOMEM; in bnx2x_init_one()
13930 bp->doorbells = ioremap(pci_resource_start(pdev, 2), in bnx2x_init_one()
13933 if (!bp->doorbells) { in bnx2x_init_one()
13934 dev_err(&bp->pdev->dev, in bnx2x_init_one()
13936 rc = -ENOMEM; in bnx2x_init_one()
13947 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) { in bnx2x_init_one()
13948 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; in bnx2x_init_one()
13949 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in bnx2x_init_one()
13960 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); in bnx2x_init_one()
13961 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count); in bnx2x_init_one()
13965 bp->flags |= NO_FCOE_FLAG; in bnx2x_init_one()
13967 /* Set bp->num_queues for MSI-X mode*/ in bnx2x_init_one()
13970 /* Configure interrupt mode: try to enable MSI-X/MSI if in bnx2x_init_one()
13975 dev_err(&pdev->dev, "Cannot set interrupts\n"); in bnx2x_init_one()
13983 dev_err(&pdev->dev, "Cannot register net device\n"); in bnx2x_init_one()
13986 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); in bnx2x_init_one()
13991 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); in bnx2x_init_one()
13995 "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n", in bnx2x_init_one()
13996 board_info[ent->driver_data].name, in bnx2x_init_one()
13998 dev->base_addr, bp->pdev->irq, dev->dev_addr); in bnx2x_init_one()
13999 pcie_print_link_status(bp->pdev); in bnx2x_init_one()
14010 if (bp->regview) in bnx2x_init_one()
14011 iounmap(bp->regview); in bnx2x_init_one()
14013 if (IS_PF(bp) && bp->doorbells) in bnx2x_init_one()
14014 iounmap(bp->doorbells); in bnx2x_init_one()
14018 if (atomic_read(&pdev->enable_cnt) == 1) in bnx2x_init_one()
14034 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); in __bnx2x_remove()
14045 (bp->flags & BC_SUPPORTS_RMMOD_CMD)) in __bnx2x_remove()
14048 /* Close the interface - either directly or implicitly */ in __bnx2x_remove()
14070 /* Disable MSI/MSI-X */ in __bnx2x_remove()
14078 cancel_delayed_work_sync(&bp->sp_rtnl_task); in __bnx2x_remove()
14086 pci_wake_from_d3(pdev, bp->wol); in __bnx2x_remove()
14091 if (bp->regview) in __bnx2x_remove()
14092 iounmap(bp->regview); in __bnx2x_remove()
14098 if (bp->doorbells) in __bnx2x_remove()
14099 iounmap(bp->doorbells); in __bnx2x_remove()
14109 if (atomic_read(&pdev->enable_cnt) == 1) in __bnx2x_remove()
14122 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); in bnx2x_remove_one()
14132 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; in bnx2x_eeh_nic_unload()
14134 bp->rx_mode = BNX2X_RX_MODE_NONE; in bnx2x_eeh_nic_unload()
14141 netdev_reset_tc(bp->dev); in bnx2x_eeh_nic_unload()
14143 del_timer_sync(&bp->timer); in bnx2x_eeh_nic_unload()
14144 cancel_delayed_work_sync(&bp->sp_task); in bnx2x_eeh_nic_unload()
14145 cancel_delayed_work_sync(&bp->period_task); in bnx2x_eeh_nic_unload()
14147 if (!down_timeout(&bp->stats_lock, HZ / 10)) { in bnx2x_eeh_nic_unload()
14148 bp->stats_state = STATS_STATE_DISABLED; in bnx2x_eeh_nic_unload()
14149 up(&bp->stats_lock); in bnx2x_eeh_nic_unload()
14154 netif_carrier_off(bp->dev); in bnx2x_eeh_nic_unload()
14160 * bnx2x_io_error_detected - called when PCI error is detected
14198 * bnx2x_io_slot_reset - called after the PCI bus has been reset
14201 * Restart the card from scratch, as if from a cold-boot.
14212 dev_err(&pdev->dev, in bnx2x_io_slot_reset()
14213 "Cannot re-enable PCI device after reset\n"); in bnx2x_io_slot_reset()
14226 BNX2X_ERR("IO slot reset --> driver unload\n"); in bnx2x_io_slot_reset()
14244 if (!bp->nic_stopped) { in bnx2x_io_slot_reset()
14252 bp->nic_stopped = true; in bnx2x_io_slot_reset()
14258 bp->sp_state = 0; in bnx2x_io_slot_reset()
14259 bp->port.pmf = 0; in bnx2x_io_slot_reset()
14269 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_io_slot_reset()
14273 bp->state = BNX2X_STATE_CLOSED; in bnx2x_io_slot_reset()
14282 * bnx2x_io_resume - called when traffic can start flowing again
14293 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { in bnx2x_io_resume()
14294 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); in bnx2x_io_resume()
14300 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & in bnx2x_io_resume()
14305 netdev_err(bp->dev, "Error during driver initialization, try unloading/reloading the driver\n"); in bnx2x_io_resume()
14365 return -ENOMEM; in bnx2x_init()
14371 return -ENOMEM; in bnx2x_init()
14410 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
14414 * Return 0 if success, -ENODEV if ramrod doesn't return.
14421 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, in bnx2x_set_iscsi_eth_mac_addr()
14422 &bp->iscsi_l2_mac_obj, true, in bnx2x_set_iscsi_eth_mac_addr()
14433 if (unlikely(bp->panic)) in bnx2x_cnic_sp_post()
14437 spin_lock_bh(&bp->spq_lock); in bnx2x_cnic_sp_post()
14438 BUG_ON(bp->cnic_spq_pending < count); in bnx2x_cnic_sp_post()
14439 bp->cnic_spq_pending -= count; in bnx2x_cnic_sp_post()
14441 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { in bnx2x_cnic_sp_post()
14442 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) in bnx2x_cnic_sp_post()
14445 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) in bnx2x_cnic_sp_post()
14455 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - in bnx2x_cnic_sp_post()
14458 &bp->context[cxt_index]. in bnx2x_cnic_sp_post()
14471 if (!atomic_read(&bp->cq_spq_left)) in bnx2x_cnic_sp_post()
14474 atomic_dec(&bp->cq_spq_left); in bnx2x_cnic_sp_post()
14476 if (!atomic_read(&bp->eq_spq_left)) in bnx2x_cnic_sp_post()
14479 atomic_dec(&bp->eq_spq_left); in bnx2x_cnic_sp_post()
14482 if (bp->cnic_spq_pending >= in bnx2x_cnic_sp_post()
14483 bp->cnic_eth_dev.max_kwqe_pending) in bnx2x_cnic_sp_post()
14486 bp->cnic_spq_pending++; in bnx2x_cnic_sp_post()
14494 *spe = *bp->cnic_kwq_cons; in bnx2x_cnic_sp_post()
14496 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n", in bnx2x_cnic_sp_post()
14497 bp->cnic_spq_pending, bp->cnic_kwq_pending, count); in bnx2x_cnic_sp_post()
14499 if (bp->cnic_kwq_cons == bp->cnic_kwq_last) in bnx2x_cnic_sp_post()
14500 bp->cnic_kwq_cons = bp->cnic_kwq; in bnx2x_cnic_sp_post()
14502 bp->cnic_kwq_cons++; in bnx2x_cnic_sp_post()
14505 spin_unlock_bh(&bp->spq_lock); in bnx2x_cnic_sp_post()
14515 if (unlikely(bp->panic)) { in bnx2x_cnic_sp_queue()
14517 return -EIO; in bnx2x_cnic_sp_queue()
14521 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && in bnx2x_cnic_sp_queue()
14522 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { in bnx2x_cnic_sp_queue()
14524 return -EAGAIN; in bnx2x_cnic_sp_queue()
14527 spin_lock_bh(&bp->spq_lock); in bnx2x_cnic_sp_queue()
14532 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) in bnx2x_cnic_sp_queue()
14535 *bp->cnic_kwq_prod = *spe; in bnx2x_cnic_sp_queue()
14537 bp->cnic_kwq_pending++; in bnx2x_cnic_sp_queue()
14539 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n", in bnx2x_cnic_sp_queue()
14540 spe->hdr.conn_and_cmd_data, spe->hdr.type, in bnx2x_cnic_sp_queue()
14541 spe->data.update_data_addr.hi, in bnx2x_cnic_sp_queue()
14542 spe->data.update_data_addr.lo, in bnx2x_cnic_sp_queue()
14543 bp->cnic_kwq_pending); in bnx2x_cnic_sp_queue()
14545 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) in bnx2x_cnic_sp_queue()
14546 bp->cnic_kwq_prod = bp->cnic_kwq; in bnx2x_cnic_sp_queue()
14548 bp->cnic_kwq_prod++; in bnx2x_cnic_sp_queue()
14551 spin_unlock_bh(&bp->spq_lock); in bnx2x_cnic_sp_queue()
14553 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) in bnx2x_cnic_sp_queue()
14564 mutex_lock(&bp->cnic_mutex); in bnx2x_cnic_ctl_send()
14565 c_ops = rcu_dereference_protected(bp->cnic_ops, in bnx2x_cnic_ctl_send()
14566 lockdep_is_held(&bp->cnic_mutex)); in bnx2x_cnic_ctl_send()
14568 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); in bnx2x_cnic_ctl_send()
14569 mutex_unlock(&bp->cnic_mutex); in bnx2x_cnic_ctl_send()
14580 c_ops = rcu_dereference(bp->cnic_ops); in bnx2x_cnic_ctl_send_bh()
14582 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); in bnx2x_cnic_ctl_send_bh()
14627 * multicasts (in non-promiscuous mode only one Queue per in bnx2x_set_iscsi_eth_rx_mode()
14637 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); in bnx2x_set_iscsi_eth_rx_mode()
14642 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); in bnx2x_set_iscsi_eth_rx_mode()
14644 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) in bnx2x_set_iscsi_eth_rx_mode()
14645 set_bit(sched_state, &bp->sp_state); in bnx2x_set_iscsi_eth_rx_mode()
14658 switch (ctl->cmd) { in bnx2x_drv_ctl()
14660 u32 index = ctl->data.io.offset; in bnx2x_drv_ctl()
14661 dma_addr_t addr = ctl->data.io.dma_addr; in bnx2x_drv_ctl()
14668 int count = ctl->data.credit.credit_count; in bnx2x_drv_ctl()
14676 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_drv_ctl()
14680 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, in bnx2x_drv_ctl()
14681 cp->iscsi_l2_client_id, in bnx2x_drv_ctl()
14682 cp->iscsi_l2_cid, BP_FUNC(bp), in bnx2x_drv_ctl()
14686 &bp->sp_state, BNX2X_OBJ_TYPE_RX, in bnx2x_drv_ctl()
14687 &bp->macs_pool); in bnx2x_drv_ctl()
14731 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, in bnx2x_drv_ctl()
14736 int count = ctl->data.credit.credit_count; in bnx2x_drv_ctl()
14739 atomic_add(count, &bp->cq_spq_left); in bnx2x_drv_ctl()
14744 int ulp_type = ctl->data.register_data.ulp_type; in bnx2x_drv_ctl()
14764 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES))) in bnx2x_drv_ctl()
14767 /* if reached here - should write fcoe capabilities */ in bnx2x_drv_ctl()
14773 host_addr = (u32 *) &(ctl->data.register_data. in bnx2x_drv_ctl()
14785 int ulp_type = ctl->data.ulp_type; in bnx2x_drv_ctl()
14803 BNX2X_ERR("unknown command %x\n", ctl->cmd); in bnx2x_drv_ctl()
14804 rc = -EINVAL; in bnx2x_drv_ctl()
14807 /* For storage-only interfaces, change driver state */ in bnx2x_drv_ctl()
14809 switch (ctl->drv_state) { in bnx2x_drv_ctl()
14825 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state); in bnx2x_drv_ctl()
14838 int rc = -EINVAL; in bnx2x_get_fc_npiv()
14844 DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n"); in bnx2x_get_fc_npiv()
14854 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n"); in bnx2x_get_fc_npiv()
14857 DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset); in bnx2x_get_fc_npiv()
14861 BNX2X_ERR("Failed to read FC-NPIV table\n"); in bnx2x_get_fc_npiv()
14868 entries = tbl->fc_npiv_cfg.num_of_npiv; in bnx2x_get_fc_npiv()
14870 tbl->fc_npiv_cfg.num_of_npiv = entries; in bnx2x_get_fc_npiv()
14872 if (!tbl->fc_npiv_cfg.num_of_npiv) { in bnx2x_get_fc_npiv()
14873 DP(BNX2X_MSG_MCP, in bnx2x_get_fc_npiv()
14874 "No FC-NPIV table [valid, simply not present]\n"); in bnx2x_get_fc_npiv()
14876 } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) { in bnx2x_get_fc_npiv()
14877 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n", in bnx2x_get_fc_npiv()
14878 tbl->fc_npiv_cfg.num_of_npiv); in bnx2x_get_fc_npiv()
14881 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n", in bnx2x_get_fc_npiv()
14882 tbl->fc_npiv_cfg.num_of_npiv); in bnx2x_get_fc_npiv()
14885 /* Copy the data into cnic-provided struct */ in bnx2x_get_fc_npiv()
14886 cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv; in bnx2x_get_fc_npiv()
14887 for (i = 0; i < cnic_tbl->count; i++) { in bnx2x_get_fc_npiv()
14888 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8); in bnx2x_get_fc_npiv()
14889 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8); in bnx2x_get_fc_npiv()
14900 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_setup_cnic_irq_info()
14902 if (bp->flags & USING_MSIX_FLAG) { in bnx2x_setup_cnic_irq_info()
14903 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; in bnx2x_setup_cnic_irq_info()
14904 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; in bnx2x_setup_cnic_irq_info()
14905 cp->irq_arr[0].vector = bp->msix_table[1].vector; in bnx2x_setup_cnic_irq_info()
14907 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; in bnx2x_setup_cnic_irq_info()
14908 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; in bnx2x_setup_cnic_irq_info()
14911 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; in bnx2x_setup_cnic_irq_info()
14913 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; in bnx2x_setup_cnic_irq_info()
14915 cp->irq_arr[0].status_blk_map = bp->cnic_sb_mapping; in bnx2x_setup_cnic_irq_info()
14916 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); in bnx2x_setup_cnic_irq_info()
14917 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); in bnx2x_setup_cnic_irq_info()
14918 cp->irq_arr[1].status_blk = bp->def_status_blk; in bnx2x_setup_cnic_irq_info()
14919 cp->irq_arr[1].status_blk_map = bp->def_status_blk_mapping; in bnx2x_setup_cnic_irq_info()
14920 cp->irq_arr[1].status_blk_num = DEF_SB_ID; in bnx2x_setup_cnic_irq_info()
14921 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; in bnx2x_setup_cnic_irq_info()
14923 cp->num_irq = 2; in bnx2x_setup_cnic_irq_info()
14928 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_setup_cnic_info()
14930 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + in bnx2x_setup_cnic_info()
14932 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; in bnx2x_setup_cnic_info()
14933 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); in bnx2x_setup_cnic_info()
14934 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); in bnx2x_setup_cnic_info()
14936DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp in bnx2x_setup_cnic_info()
14937 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid, in bnx2x_setup_cnic_info()
14938 cp->iscsi_l2_cid); in bnx2x_setup_cnic_info()
14941 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; in bnx2x_setup_cnic_info()
14948 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_register_cnic()
14951 DP(NETIF_MSG_IFUP, "Register_cnic called\n"); in bnx2x_register_cnic()
14955 return -EINVAL; in bnx2x_register_cnic()
14960 return -EOPNOTSUPP; in bnx2x_register_cnic()
14966 BNX2X_ERR("CNIC-related load failed\n"); in bnx2x_register_cnic()
14971 bp->cnic_enabled = true; in bnx2x_register_cnic()
14973 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); in bnx2x_register_cnic()
14974 if (!bp->cnic_kwq) in bnx2x_register_cnic()
14975 return -ENOMEM; in bnx2x_register_cnic()
14977 bp->cnic_kwq_cons = bp->cnic_kwq; in bnx2x_register_cnic()
14978 bp->cnic_kwq_prod = bp->cnic_kwq; in bnx2x_register_cnic()
14979 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; in bnx2x_register_cnic()
14981 bp->cnic_spq_pending = 0; in bnx2x_register_cnic()
14982 bp->cnic_kwq_pending = 0; in bnx2x_register_cnic()
14984 bp->cnic_data = data; in bnx2x_register_cnic()
14986 cp->num_irq = 0; in bnx2x_register_cnic()
14987 cp->drv_state |= CNIC_DRV_STATE_REGD; in bnx2x_register_cnic()
14988 cp->iro_arr = bp->iro_arr; in bnx2x_register_cnic()
14992 rcu_assign_pointer(bp->cnic_ops, ops); in bnx2x_register_cnic()
15003 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_unregister_cnic()
15005 mutex_lock(&bp->cnic_mutex); in bnx2x_unregister_cnic()
15006 cp->drv_state = 0; in bnx2x_unregister_cnic()
15007 RCU_INIT_POINTER(bp->cnic_ops, NULL); in bnx2x_unregister_cnic()
15008 mutex_unlock(&bp->cnic_mutex); in bnx2x_unregister_cnic()
15010 bp->cnic_enabled = false; in bnx2x_unregister_cnic()
15011 kfree(bp->cnic_kwq); in bnx2x_unregister_cnic()
15012 bp->cnic_kwq = NULL; in bnx2x_unregister_cnic()
15020 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_cnic_probe()
15022 /* If both iSCSI and FCoE are disabled - return NULL in in bnx2x_cnic_probe()
15029 cp->drv_owner = THIS_MODULE; in bnx2x_cnic_probe()
15030 cp->chip_id = CHIP_ID(bp); in bnx2x_cnic_probe()
15031 cp->pdev = bp->pdev; in bnx2x_cnic_probe()
15032 cp->io_base = bp->regview; in bnx2x_cnic_probe()
15033 cp->io_base2 = bp->doorbells; in bnx2x_cnic_probe()
15034 cp->max_kwqe_pending = 8; in bnx2x_cnic_probe()
15035 cp->ctx_blk_size = CDU_ILT_PAGE_SZ; in bnx2x_cnic_probe()
15036 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + in bnx2x_cnic_probe()
15038 cp->ctx_tbl_len = CNIC_ILT_LINES; in bnx2x_cnic_probe()
15039 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; in bnx2x_cnic_probe()
15040 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; in bnx2x_cnic_probe()
15041 cp->drv_ctl = bnx2x_drv_ctl; in bnx2x_cnic_probe()
15042 cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv; in bnx2x_cnic_probe()
15043 cp->drv_register_cnic = bnx2x_register_cnic; in bnx2x_cnic_probe()
15044 cp->drv_unregister_cnic = bnx2x_unregister_cnic; in bnx2x_cnic_probe()
15045 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); in bnx2x_cnic_probe()
15046 cp->iscsi_l2_client_id = in bnx2x_cnic_probe()
15048 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); in bnx2x_cnic_probe()
15051 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; in bnx2x_cnic_probe()
15054 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; in bnx2x_cnic_probe()
15057 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; in bnx2x_cnic_probe()
15061 cp->ctx_blk_size, in bnx2x_cnic_probe()
15062 cp->ctx_tbl_offset, in bnx2x_cnic_probe()
15063 cp->ctx_tbl_len, in bnx2x_cnic_probe()
15064 cp->starting_cid); in bnx2x_cnic_probe()
15070 struct bnx2x *bp = fp->bp; in bnx2x_rx_ustorm_prods_offset()
15076 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); in bnx2x_rx_ustorm_prods_offset()
15078 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); in bnx2x_rx_ustorm_prods_offset()
15085 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
15093 return -1; in bnx2x_pretend_func()
15113 * still not complete, may indicate an error state - bail out then. in bnx2x_ptp_task()
15136 ns = timecounter_cyc2time(&bp->timecounter, timestamp); in bnx2x_ptp_task()
15140 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps); in bnx2x_ptp_task()
15142 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", in bnx2x_ptp_task()
15145 DP(BNX2X_MSG_PTP, in bnx2x_ptp_task()
15148 bp->eth_stats.ptp_skip_tx_ts++; in bnx2x_ptp_task()
15151 dev_kfree_skb_any(bp->ptp_tx_skb); in bnx2x_ptp_task()
15152 bp->ptp_tx_skb = NULL; in bnx2x_ptp_task()
15170 ns = timecounter_cyc2time(&bp->timecounter, timestamp); in bnx2x_set_rx_ts()
15172 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); in bnx2x_set_rx_ts()
15174 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n", in bnx2x_set_rx_ts()
15191 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles); in bnx2x_cyclecounter_read()
15198 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); in bnx2x_init_cyclecounter()
15199 bp->cyclecounter.read = bnx2x_cyclecounter_read; in bnx2x_init_cyclecounter()
15200 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64); in bnx2x_init_cyclecounter()
15201 bp->cyclecounter.shift = 0; in bnx2x_init_cyclecounter()
15202 bp->cyclecounter.mult = 1; in bnx2x_init_cyclecounter()
15215 func_params.f_obj = &bp->func_obj; in bnx2x_send_reset_timesync_ramrod()
15219 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET; in bnx2x_send_reset_timesync_ramrod()
15220 set_timesync_params->offset_cmd = TS_OFFSET_KEEP; in bnx2x_send_reset_timesync_ramrod()
15241 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_enable_ptp_packets()
15276 if (!bp->hwtstamp_ioctl_called) in bnx2x_configure_ptp_filters()
15283 switch (bp->tx_type) { in bnx2x_configure_ptp_filters()
15285 bp->flags |= TX_TIMESTAMPING_EN; in bnx2x_configure_ptp_filters()
15291 BNX2X_ERR("One-step timestamping is not supported\n"); in bnx2x_configure_ptp_filters()
15292 return -ERANGE; in bnx2x_configure_ptp_filters()
15299 switch (bp->rx_filter) { in bnx2x_configure_ptp_filters()
15305 bp->rx_filter = HWTSTAMP_FILTER_NONE; in bnx2x_configure_ptp_filters()
15310 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; in bnx2x_configure_ptp_filters()
15318 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; in bnx2x_configure_ptp_filters()
15326 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; in bnx2x_configure_ptp_filters()
15335 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; in bnx2x_configure_ptp_filters()
15359 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n"); in bnx2x_hwtstamp_ioctl()
15361 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) in bnx2x_hwtstamp_ioctl()
15362 return -EFAULT; in bnx2x_hwtstamp_ioctl()
15364 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n", in bnx2x_hwtstamp_ioctl()
15367 bp->hwtstamp_ioctl_called = true; in bnx2x_hwtstamp_ioctl()
15368 bp->tx_type = config.tx_type; in bnx2x_hwtstamp_ioctl()
15369 bp->rx_filter = config.rx_filter; in bnx2x_hwtstamp_ioctl()
15375 config.rx_filter = bp->rx_filter; in bnx2x_hwtstamp_ioctl()
15377 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? in bnx2x_hwtstamp_ioctl()
15378 -EFAULT : 0; in bnx2x_hwtstamp_ioctl()
15387 /* Reset PTP event detection rules - will be configured in the IOCTL */ in bnx2x_configure_ptp()
15397 /* Disable PTP packets to host - will be configured in the IOCTL*/ in bnx2x_configure_ptp()
15405 /* Enable the free-running counter */ in bnx2x_configure_ptp()
15414 return -EFAULT; in bnx2x_configure_ptp()
15426 /* Called during load, to initialize PTP-related stuff */
15439 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task); in bnx2x_init_ptp()
15445 if (!bp->timecounter_init_done) { in bnx2x_init_ptp()
15447 timecounter_init(&bp->timecounter, &bp->cyclecounter, in bnx2x_init_ptp()
15449 bp->timecounter_init_done = true; in bnx2x_init_ptp()
15452 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n"); in bnx2x_init_ptp()