Lines Matching +full:rpm +full:- +full:stats
1 // SPDX-License-Identifier: GPL-2.0
33 static int debug = -1;
81 struct net_device *dev = adapter->netdev; in igc_reset()
82 struct igc_hw *hw = &adapter->hw; in igc_reset()
83 struct igc_fc_info *fc = &hw->fc; in igc_reset()
95 * - the full Rx FIFO size minus one full Tx plus one full Rx frame in igc_reset()
97 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); in igc_reset()
99 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ in igc_reset()
100 fc->low_water = fc->high_water - 16; in igc_reset()
101 fc->pause_time = 0xFFFF; in igc_reset()
102 fc->send_xon = 1; in igc_reset()
103 fc->current_mode = fc->requested_mode; in igc_reset()
105 hw->mac.ops.reset_hw(hw); in igc_reset()
107 if (hw->mac.ops.init_hw(hw)) in igc_reset()
110 /* Re-establish EEE setting */ in igc_reset()
113 if (!netif_running(adapter->netdev)) in igc_reset()
114 igc_power_down_phy_copper_base(&adapter->hw); in igc_reset()
119 /* Re-enable PTP, where applicable. */ in igc_reset()
122 /* Re-enable TSN offloading, where applicable. */ in igc_reset()
129 * igc_power_up_link - Power up the phy link
134 igc_reset_phy(&adapter->hw); in igc_power_up_link()
136 igc_power_up_phy_copper(&adapter->hw); in igc_power_up_link()
138 igc_setup_link(&adapter->hw); in igc_power_up_link()
142 * igc_release_hw_control - release control of the h/w to f/w
151 struct igc_hw *hw = &adapter->hw; in igc_release_hw_control()
154 if (!pci_device_is_present(adapter->pdev)) in igc_release_hw_control()
164 * igc_get_hw_control - get control of the h/w from f/w
173 struct igc_hw *hw = &adapter->hw; in igc_get_hw_control()
191 * igc_clean_tx_ring - Free Tx Buffers
196 u16 i = tx_ring->next_to_clean; in igc_clean_tx_ring()
197 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_clean_tx_ring()
200 while (i != tx_ring->next_to_use) { in igc_clean_tx_ring()
203 switch (tx_buffer->type) { in igc_clean_tx_ring()
208 xdp_return_frame(tx_buffer->xdpf); in igc_clean_tx_ring()
209 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
212 dev_kfree_skb_any(tx_buffer->skb); in igc_clean_tx_ring()
213 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
216 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); in igc_clean_tx_ring()
221 eop_desc = tx_buffer->next_to_watch; in igc_clean_tx_ring()
229 if (unlikely(i == tx_ring->count)) { in igc_clean_tx_ring()
231 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_ring()
237 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
240 tx_buffer->next_to_watch = NULL; in igc_clean_tx_ring()
245 if (unlikely(i == tx_ring->count)) { in igc_clean_tx_ring()
247 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_ring()
251 if (tx_ring->xsk_pool && xsk_frames) in igc_clean_tx_ring()
252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_ring()
258 memset(tx_ring->tx_buffer_info, 0, in igc_clean_tx_ring()
259 sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); in igc_clean_tx_ring()
262 memset(tx_ring->desc, 0, tx_ring->size); in igc_clean_tx_ring()
265 tx_ring->next_to_use = 0; in igc_clean_tx_ring()
266 tx_ring->next_to_clean = 0; in igc_clean_tx_ring()
270 * igc_free_tx_resources - Free Tx Resources per Queue
279 vfree(tx_ring->tx_buffer_info); in igc_free_tx_resources()
280 tx_ring->tx_buffer_info = NULL; in igc_free_tx_resources()
283 if (!tx_ring->desc) in igc_free_tx_resources()
286 dma_free_coherent(tx_ring->dev, tx_ring->size, in igc_free_tx_resources()
287 tx_ring->desc, tx_ring->dma); in igc_free_tx_resources()
289 tx_ring->desc = NULL; in igc_free_tx_resources()
293 * igc_free_all_tx_resources - Free Tx Resources for All Queues
302 for (i = 0; i < adapter->num_tx_queues; i++) in igc_free_all_tx_resources()
303 igc_free_tx_resources(adapter->tx_ring[i]); in igc_free_all_tx_resources()
307 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
314 for (i = 0; i < adapter->num_tx_queues; i++) in igc_clean_all_tx_rings()
315 if (adapter->tx_ring[i]) in igc_clean_all_tx_rings()
316 igc_clean_tx_ring(adapter->tx_ring[i]); in igc_clean_all_tx_rings()
321 struct igc_hw *hw = &ring->q_vector->adapter->hw; in igc_disable_tx_ring_hw()
322 u8 idx = ring->reg_idx; in igc_disable_tx_ring_hw()
332 * igc_disable_all_tx_rings_hw - Disable all transmit queue operation
339 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_disable_all_tx_rings_hw()
340 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_disable_all_tx_rings_hw()
347 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
354 struct net_device *ndev = tx_ring->netdev; in igc_setup_tx_resources()
355 struct device *dev = tx_ring->dev; in igc_setup_tx_resources()
358 size = sizeof(struct igc_tx_buffer) * tx_ring->count; in igc_setup_tx_resources()
359 tx_ring->tx_buffer_info = vzalloc(size); in igc_setup_tx_resources()
360 if (!tx_ring->tx_buffer_info) in igc_setup_tx_resources()
364 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); in igc_setup_tx_resources()
365 tx_ring->size = ALIGN(tx_ring->size, 4096); in igc_setup_tx_resources()
367 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in igc_setup_tx_resources()
368 &tx_ring->dma, GFP_KERNEL); in igc_setup_tx_resources()
370 if (!tx_ring->desc) in igc_setup_tx_resources()
373 tx_ring->next_to_use = 0; in igc_setup_tx_resources()
374 tx_ring->next_to_clean = 0; in igc_setup_tx_resources()
379 vfree(tx_ring->tx_buffer_info); in igc_setup_tx_resources()
381 return -ENOMEM; in igc_setup_tx_resources()
385 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
392 struct net_device *dev = adapter->netdev; in igc_setup_all_tx_resources()
395 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_setup_all_tx_resources()
396 err = igc_setup_tx_resources(adapter->tx_ring[i]); in igc_setup_all_tx_resources()
399 for (i--; i >= 0; i--) in igc_setup_all_tx_resources()
400 igc_free_tx_resources(adapter->tx_ring[i]); in igc_setup_all_tx_resources()
410 u16 i = rx_ring->next_to_clean; in igc_clean_rx_ring_page_shared()
412 dev_kfree_skb(rx_ring->skb); in igc_clean_rx_ring_page_shared()
413 rx_ring->skb = NULL; in igc_clean_rx_ring_page_shared()
416 while (i != rx_ring->next_to_alloc) { in igc_clean_rx_ring_page_shared()
417 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; in igc_clean_rx_ring_page_shared()
422 dma_sync_single_range_for_cpu(rx_ring->dev, in igc_clean_rx_ring_page_shared()
423 buffer_info->dma, in igc_clean_rx_ring_page_shared()
424 buffer_info->page_offset, in igc_clean_rx_ring_page_shared()
429 dma_unmap_page_attrs(rx_ring->dev, in igc_clean_rx_ring_page_shared()
430 buffer_info->dma, in igc_clean_rx_ring_page_shared()
434 __page_frag_cache_drain(buffer_info->page, in igc_clean_rx_ring_page_shared()
435 buffer_info->pagecnt_bias); in igc_clean_rx_ring_page_shared()
438 if (i == rx_ring->count) in igc_clean_rx_ring_page_shared()
448 for (i = 0; i < ring->count; i++) { in igc_clean_rx_ring_xsk_pool()
449 bi = &ring->rx_buffer_info[i]; in igc_clean_rx_ring_xsk_pool()
450 if (!bi->xdp) in igc_clean_rx_ring_xsk_pool()
453 xsk_buff_free(bi->xdp); in igc_clean_rx_ring_xsk_pool()
454 bi->xdp = NULL; in igc_clean_rx_ring_xsk_pool()
459 * igc_clean_rx_ring - Free Rx Buffers per Queue
464 if (ring->xsk_pool) in igc_clean_rx_ring()
471 ring->next_to_alloc = 0; in igc_clean_rx_ring()
472 ring->next_to_clean = 0; in igc_clean_rx_ring()
473 ring->next_to_use = 0; in igc_clean_rx_ring()
477 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
484 for (i = 0; i < adapter->num_rx_queues; i++) in igc_clean_all_rx_rings()
485 if (adapter->rx_ring[i]) in igc_clean_all_rx_rings()
486 igc_clean_rx_ring(adapter->rx_ring[i]); in igc_clean_all_rx_rings()
490 * igc_free_rx_resources - Free Rx Resources
499 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igc_free_rx_resources()
501 vfree(rx_ring->rx_buffer_info); in igc_free_rx_resources()
502 rx_ring->rx_buffer_info = NULL; in igc_free_rx_resources()
505 if (!rx_ring->desc) in igc_free_rx_resources()
508 dma_free_coherent(rx_ring->dev, rx_ring->size, in igc_free_rx_resources()
509 rx_ring->desc, rx_ring->dma); in igc_free_rx_resources()
511 rx_ring->desc = NULL; in igc_free_rx_resources()
515 * igc_free_all_rx_resources - Free Rx Resources for All Queues
524 for (i = 0; i < adapter->num_rx_queues; i++) in igc_free_all_rx_resources()
525 igc_free_rx_resources(adapter->rx_ring[i]); in igc_free_all_rx_resources()
529 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
536 struct net_device *ndev = rx_ring->netdev; in igc_setup_rx_resources()
537 struct device *dev = rx_ring->dev; in igc_setup_rx_resources()
538 u8 index = rx_ring->queue_index; in igc_setup_rx_resources()
541 /* XDP RX-queue info */ in igc_setup_rx_resources()
542 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) in igc_setup_rx_resources()
543 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igc_setup_rx_resources()
544 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, in igc_setup_rx_resources()
545 rx_ring->q_vector->napi.napi_id); in igc_setup_rx_resources()
552 size = sizeof(struct igc_rx_buffer) * rx_ring->count; in igc_setup_rx_resources()
553 rx_ring->rx_buffer_info = vzalloc(size); in igc_setup_rx_resources()
554 if (!rx_ring->rx_buffer_info) in igc_setup_rx_resources()
560 rx_ring->size = rx_ring->count * desc_len; in igc_setup_rx_resources()
561 rx_ring->size = ALIGN(rx_ring->size, 4096); in igc_setup_rx_resources()
563 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in igc_setup_rx_resources()
564 &rx_ring->dma, GFP_KERNEL); in igc_setup_rx_resources()
566 if (!rx_ring->desc) in igc_setup_rx_resources()
569 rx_ring->next_to_alloc = 0; in igc_setup_rx_resources()
570 rx_ring->next_to_clean = 0; in igc_setup_rx_resources()
571 rx_ring->next_to_use = 0; in igc_setup_rx_resources()
576 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igc_setup_rx_resources()
577 vfree(rx_ring->rx_buffer_info); in igc_setup_rx_resources()
578 rx_ring->rx_buffer_info = NULL; in igc_setup_rx_resources()
580 return -ENOMEM; in igc_setup_rx_resources()
584 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
592 struct net_device *dev = adapter->netdev; in igc_setup_all_rx_resources()
595 for (i = 0; i < adapter->num_rx_queues; i++) { in igc_setup_all_rx_resources()
596 err = igc_setup_rx_resources(adapter->rx_ring[i]); in igc_setup_all_rx_resources()
599 for (i--; i >= 0; i--) in igc_setup_all_rx_resources()
600 igc_free_rx_resources(adapter->rx_ring[i]); in igc_setup_all_rx_resources()
612 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) in igc_get_xsk_pool()
615 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); in igc_get_xsk_pool()
619 * igc_configure_rx_ring - Configure a receive ring after Reset
628 struct igc_hw *hw = &adapter->hw; in igc_configure_rx_ring()
630 int reg_idx = ring->reg_idx; in igc_configure_rx_ring()
632 u64 rdba = ring->dma; in igc_configure_rx_ring()
635 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); in igc_configure_rx_ring()
636 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_rx_ring()
637 if (ring->xsk_pool) { in igc_configure_rx_ring()
638 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in igc_configure_rx_ring()
641 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in igc_configure_rx_ring()
643 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in igc_configure_rx_ring()
659 ring->count * sizeof(union igc_adv_rx_desc)); in igc_configure_rx_ring()
662 ring->tail = adapter->io_addr + IGC_RDT(reg_idx); in igc_configure_rx_ring()
664 writel(0, ring->tail); in igc_configure_rx_ring()
666 /* reset next-to- use/clean to place SW in sync with hardware */ in igc_configure_rx_ring()
667 ring->next_to_clean = 0; in igc_configure_rx_ring()
668 ring->next_to_use = 0; in igc_configure_rx_ring()
670 if (ring->xsk_pool) in igc_configure_rx_ring()
671 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); in igc_configure_rx_ring()
691 memset(ring->rx_buffer_info, 0, in igc_configure_rx_ring()
692 sizeof(struct igc_rx_buffer) * ring->count); in igc_configure_rx_ring()
696 rx_desc->wb.upper.length = 0; in igc_configure_rx_ring()
705 * igc_configure_rx - Configure receive Unit after Reset
717 for (i = 0; i < adapter->num_rx_queues; i++) in igc_configure_rx()
718 igc_configure_rx_ring(adapter, adapter->rx_ring[i]); in igc_configure_rx()
722 * igc_configure_tx_ring - Configure transmit ring after Reset
731 struct igc_hw *hw = &adapter->hw; in igc_configure_tx_ring()
732 int reg_idx = ring->reg_idx; in igc_configure_tx_ring()
733 u64 tdba = ring->dma; in igc_configure_tx_ring()
736 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_tx_ring()
743 ring->count * sizeof(union igc_adv_tx_desc)); in igc_configure_tx_ring()
748 ring->tail = adapter->io_addr + IGC_TDT(reg_idx); in igc_configure_tx_ring()
750 writel(0, ring->tail); in igc_configure_tx_ring()
761 * igc_configure_tx - Configure transmit Unit after Reset
770 for (i = 0; i < adapter->num_tx_queues; i++) in igc_configure_tx()
771 igc_configure_tx_ring(adapter, adapter->tx_ring[i]); in igc_configure_tx()
775 * igc_setup_mrqc - configure the multiple receive queue control registers
780 struct igc_hw *hw = &adapter->hw; in igc_setup_mrqc()
789 num_rx_queues = adapter->rss_queues; in igc_setup_mrqc()
791 if (adapter->rss_indir_tbl_init != num_rx_queues) { in igc_setup_mrqc()
793 adapter->rss_indir_tbl[j] = in igc_setup_mrqc()
795 adapter->rss_indir_tbl_init = num_rx_queues; in igc_setup_mrqc()
821 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) in igc_setup_mrqc()
823 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) in igc_setup_mrqc()
832 * igc_setup_rctl - configure the receive control registers
837 struct igc_hw *hw = &adapter->hw; in igc_setup_rctl()
846 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); in igc_setup_rctl()
859 /* disable queue 0 to prevent tail write w/o re-config */ in igc_setup_rctl()
863 if (adapter->netdev->features & NETIF_F_RXALL) { in igc_setup_rctl()
879 * igc_setup_tctl - configure the transmit control registers
884 struct igc_hw *hw = &adapter->hw; in igc_setup_tctl()
903 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
908 * @queue: If non-negative, queue assignment feature is enabled and frames
916 struct net_device *dev = adapter->netdev; in igc_set_mac_filter_hw()
917 struct igc_hw *hw = &adapter->hw; in igc_set_mac_filter_hw()
920 if (WARN_ON(index >= hw->mac.rar_entry_count)) in igc_set_mac_filter_hw()
946 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
952 struct net_device *dev = adapter->netdev; in igc_clear_mac_filter_hw()
953 struct igc_hw *hw = &adapter->hw; in igc_clear_mac_filter_hw()
955 if (WARN_ON(index >= hw->mac.rar_entry_count)) in igc_clear_mac_filter_hw()
967 struct net_device *dev = adapter->netdev; in igc_set_default_mac_filter()
968 u8 *addr = adapter->hw.mac.addr; in igc_set_default_mac_filter()
972 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); in igc_set_default_mac_filter()
976 * igc_set_mac - Change the Ethernet Address of the NIC
985 struct igc_hw *hw = &adapter->hw; in igc_set_mac()
988 if (!is_valid_ether_addr(addr->sa_data)) in igc_set_mac()
989 return -EADDRNOTAVAIL; in igc_set_mac()
991 eth_hw_addr_set(netdev, addr->sa_data); in igc_set_mac()
992 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); in igc_set_mac()
1001 * igc_write_mc_addr_list - write multicast addresses to MTA
1005 * Returns: -ENOMEM on failure
1012 struct igc_hw *hw = &adapter->hw; in igc_write_mc_addr_list()
1025 return -ENOMEM; in igc_write_mc_addr_list()
1030 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); in igc_write_mc_addr_list()
1041 struct igc_adapter *adapter = netdev_priv(ring->netdev); in igc_tx_launchtime()
1042 ktime_t cycle_time = adapter->cycle_time; in igc_tx_launchtime()
1043 ktime_t base_time = adapter->base_time; in igc_tx_launchtime()
1055 if (baset_est != ring->last_ff_cycle) { in igc_tx_launchtime()
1057 ring->last_ff_cycle = baset_est; in igc_tx_launchtime()
1059 if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0) in igc_tx_launchtime()
1070 netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", in igc_tx_launchtime()
1073 ring->last_tx_cycle = end_of_cycle; in igc_tx_launchtime()
1093 dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); in igc_init_empty_frame()
1094 if (dma_mapping_error(ring->dev, dma)) { in igc_init_empty_frame()
1096 netdev_name(ring->netdev)); in igc_init_empty_frame()
1097 return -ENOMEM; in igc_init_empty_frame()
1100 buffer->type = IGC_TX_BUFFER_TYPE_SKB; in igc_init_empty_frame()
1101 buffer->skb = skb; in igc_init_empty_frame()
1102 buffer->protocol = 0; in igc_init_empty_frame()
1103 buffer->bytecount = skb->len; in igc_init_empty_frame()
1104 buffer->gso_segs = 1; in igc_init_empty_frame()
1105 buffer->time_stamp = jiffies; in igc_init_empty_frame()
1106 dma_unmap_len_set(buffer, len, skb->len); in igc_init_empty_frame()
1121 first->bytecount; in igc_init_tx_empty_descriptor()
1122 olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; in igc_init_tx_empty_descriptor()
1124 desc = IGC_TX_DESC(ring, ring->next_to_use); in igc_init_tx_empty_descriptor()
1125 desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_init_tx_empty_descriptor()
1126 desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_init_tx_empty_descriptor()
1127 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); in igc_init_tx_empty_descriptor()
1129 netdev_tx_sent_queue(txring_txq(ring), skb->len); in igc_init_tx_empty_descriptor()
1131 first->next_to_watch = desc; in igc_init_tx_empty_descriptor()
1133 ring->next_to_use++; in igc_init_tx_empty_descriptor()
1134 if (ring->next_to_use == ring->count) in igc_init_tx_empty_descriptor()
1135 ring->next_to_use = 0; in igc_init_tx_empty_descriptor()
1146 u16 i = tx_ring->next_to_use; in igc_tx_ctxtdesc()
1151 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in igc_tx_ctxtdesc()
1157 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igc_tx_ctxtdesc()
1158 mss_l4len_idx |= tx_ring->reg_idx << 4; in igc_tx_ctxtdesc()
1163 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); in igc_tx_ctxtdesc()
1164 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); in igc_tx_ctxtdesc()
1165 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); in igc_tx_ctxtdesc()
1166 context_desc->launch_time = launch_time; in igc_tx_ctxtdesc()
1172 struct sk_buff *skb = first->skb; in igc_tx_csum()
1176 if (skb->ip_summed != CHECKSUM_PARTIAL) { in igc_tx_csum()
1178 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && in igc_tx_csum()
1179 !tx_ring->launchtime_enable) in igc_tx_csum()
1184 switch (skb->csum_offset) { in igc_tx_csum()
1203 first->tx_flags |= IGC_TX_FLAGS_CSUM; in igc_tx_csum()
1204 vlan_macip_lens = skb_checksum_start_offset(skb) - in igc_tx_csum()
1208 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; in igc_tx_csum()
1216 struct net_device *netdev = tx_ring->netdev; in __igc_maybe_stop_tx()
1218 netif_stop_subqueue(netdev, tx_ring->queue_index); in __igc_maybe_stop_tx()
1227 return -EBUSY; in __igc_maybe_stop_tx()
1230 netif_wake_subqueue(netdev, tx_ring->queue_index); in __igc_maybe_stop_tx()
1232 u64_stats_update_begin(&tx_ring->tx_syncp2); in __igc_maybe_stop_tx()
1233 tx_ring->tx_stats.restart_queue2++; in __igc_maybe_stop_tx()
1234 u64_stats_update_end(&tx_ring->tx_syncp2); in __igc_maybe_stop_tx()
1282 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); in igc_tx_cmd_type()
1305 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_tx_olinfo_status()
1312 struct sk_buff *skb = first->skb; in igc_tx_map()
1315 u32 tx_flags = first->tx_flags; in igc_tx_map()
1317 u16 i = tx_ring->next_to_use; in igc_tx_map()
1325 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); in igc_tx_map()
1328 data_len = skb->data_len; in igc_tx_map()
1330 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in igc_tx_map()
1334 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in igc_tx_map()
1335 if (dma_mapping_error(tx_ring->dev, dma)) in igc_tx_map()
1342 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igc_tx_map()
1345 tx_desc->read.cmd_type_len = in igc_tx_map()
1350 if (i == tx_ring->count) { in igc_tx_map()
1354 tx_desc->read.olinfo_status = 0; in igc_tx_map()
1357 size -= IGC_MAX_DATA_PER_TXD; in igc_tx_map()
1359 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igc_tx_map()
1365 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); in igc_tx_map()
1369 if (i == tx_ring->count) { in igc_tx_map()
1373 tx_desc->read.olinfo_status = 0; in igc_tx_map()
1376 data_len -= size; in igc_tx_map()
1378 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, in igc_tx_map()
1381 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1386 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_tx_map()
1388 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in igc_tx_map()
1391 first->time_stamp = jiffies; in igc_tx_map()
1396 * are new descriptors to fetch. (Only applicable for weak-ordered in igc_tx_map()
1397 * memory model archs, such as IA-64). in igc_tx_map()
1405 first->next_to_watch = tx_desc; in igc_tx_map()
1408 if (i == tx_ring->count) in igc_tx_map()
1411 tx_ring->next_to_use = i; in igc_tx_map()
1417 writel(i, tx_ring->tail); in igc_tx_map()
1422 netdev_err(tx_ring->netdev, "TX DMA map failed\n"); in igc_tx_map()
1423 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1428 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_tx_map()
1430 if (i-- == 0) in igc_tx_map()
1431 i += tx_ring->count; in igc_tx_map()
1432 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1436 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_tx_map()
1438 dev_kfree_skb_any(tx_buffer->skb); in igc_tx_map()
1439 tx_buffer->skb = NULL; in igc_tx_map()
1441 tx_ring->next_to_use = i; in igc_tx_map()
1443 return -1; in igc_tx_map()
1452 struct sk_buff *skb = first->skb; in igc_tso()
1466 if (skb->ip_summed != CHECKSUM_PARTIAL) in igc_tso()
1483 if (ip.v4->version == 4) { in igc_tso()
1485 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); in igc_tso()
1490 ip.v4->check = csum_fold(csum_partial(trans_start, in igc_tso()
1491 csum_start - trans_start, in igc_tso()
1495 ip.v4->tot_len = 0; in igc_tso()
1496 first->tx_flags |= IGC_TX_FLAGS_TSO | in igc_tso()
1500 ip.v6->payload_len = 0; in igc_tso()
1501 first->tx_flags |= IGC_TX_FLAGS_TSO | in igc_tso()
1506 l4_offset = l4.hdr - skb->data; in igc_tso()
1509 paylen = skb->len - l4_offset; in igc_tso()
1512 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in igc_tso()
1513 csum_replace_by_diff(&l4.tcp->check, in igc_tso()
1518 csum_replace_by_diff(&l4.udp->check, in igc_tso()
1523 first->gso_segs = skb_shinfo(skb)->gso_segs; in igc_tso()
1524 first->bytecount += (first->gso_segs - 1) * *hdr_len; in igc_tso()
1527 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; in igc_tso()
1528 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; in igc_tso()
1531 vlan_macip_lens = l4.hdr - ip.hdr; in igc_tso()
1532 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; in igc_tso()
1533 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; in igc_tso()
1546 struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; in igc_request_tx_tstamp()
1548 if (tstamp->skb) in igc_request_tx_tstamp()
1551 tstamp->skb = skb_get(skb); in igc_request_tx_tstamp()
1552 tstamp->start = jiffies; in igc_request_tx_tstamp()
1553 *flags = tstamp->flags; in igc_request_tx_tstamp()
1568 empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igc_insert_empty_frame()
1572 netdev_name(tx_ring->netdev)); in igc_insert_empty_frame()
1573 return -ENOMEM; in igc_insert_empty_frame()
1598 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); in igc_xmit_frame_ring()
1617 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in igc_xmit_frame_ring()
1619 &skb_shinfo(skb)->frags[f])); in igc_xmit_frame_ring()
1626 if (!tx_ring->launchtime_enable) in igc_xmit_frame_ring()
1629 txtime = skb->tstamp; in igc_xmit_frame_ring()
1630 skb->tstamp = ktime_set(0, 0); in igc_xmit_frame_ring()
1648 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igc_xmit_frame_ring()
1649 first->type = IGC_TX_BUFFER_TYPE_SKB; in igc_xmit_frame_ring()
1650 first->skb = skb; in igc_xmit_frame_ring()
1651 first->bytecount = skb->len; in igc_xmit_frame_ring()
1652 first->gso_segs = 1; in igc_xmit_frame_ring()
1654 if (adapter->qbv_transition || tx_ring->oper_gate_closed) in igc_xmit_frame_ring()
1657 if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) { in igc_xmit_frame_ring()
1658 adapter->stats.txdrop++; in igc_xmit_frame_ring()
1662 if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && in igc_xmit_frame_ring()
1663 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { in igc_xmit_frame_ring()
1667 spin_lock_irqsave(&adapter->ptp_tx_lock, flags); in igc_xmit_frame_ring()
1669 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in igc_xmit_frame_ring()
1671 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES) in igc_xmit_frame_ring()
1674 adapter->tx_hwtstamp_skipped++; in igc_xmit_frame_ring()
1677 spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); in igc_xmit_frame_ring()
1686 first->tx_flags = tx_flags; in igc_xmit_frame_ring()
1687 first->protocol = protocol; in igc_xmit_frame_ring()
1700 dev_kfree_skb_any(first->skb); in igc_xmit_frame_ring()
1701 first->skb = NULL; in igc_xmit_frame_ring()
1709 unsigned int r_idx = skb->queue_mapping; in igc_tx_queue_mapping()
1711 if (r_idx >= adapter->num_tx_queues) in igc_tx_queue_mapping()
1712 r_idx = r_idx % adapter->num_tx_queues; in igc_tx_queue_mapping()
1714 return adapter->tx_ring[r_idx]; in igc_tx_queue_mapping()
1725 if (skb->len < 17) { in igc_xmit_frame()
1728 skb->len = 17; in igc_xmit_frame()
1745 if (!(ring->netdev->features & NETIF_F_RXCSUM)) in igc_rx_checksum()
1756 if (!(skb->len == 60 && in igc_rx_checksum()
1757 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { in igc_rx_checksum()
1758 u64_stats_update_begin(&ring->rx_syncp); in igc_rx_checksum()
1759 ring->rx_stats.csum_err++; in igc_rx_checksum()
1760 u64_stats_update_end(&ring->rx_syncp); in igc_rx_checksum()
1768 skb->ip_summed = CHECKSUM_UNNECESSARY; in igc_rx_checksum()
1770 netdev_dbg(ring->netdev, "cksum success: bits %08X\n", in igc_rx_checksum()
1771 le32_to_cpu(rx_desc->wb.upper.status_error)); in igc_rx_checksum()
1787 [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */
1798 if (ring->netdev->features & NETIF_F_RXHASH) { in igc_rx_hash()
1799 u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); in igc_rx_hash()
1810 struct net_device *dev = rx_ring->netdev; in igc_rx_vlan()
1813 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && in igc_rx_vlan()
1816 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) in igc_rx_vlan()
1817 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); in igc_rx_vlan()
1819 vid = le16_to_cpu(rx_desc->wb.upper.vlan); in igc_rx_vlan()
1826 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1845 skb_record_rx_queue(skb, rx_ring->queue_index); in igc_process_skb_fields()
1847 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in igc_process_skb_fields()
1854 struct igc_hw *hw = &adapter->hw; in igc_vlan_mode()
1871 igc_vlan_mode(adapter->netdev, adapter->netdev->features); in igc_restore_vlan()
1880 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in igc_get_rx_buffer()
1883 page_count(rx_buffer->page); in igc_get_rx_buffer()
1887 prefetchw(rx_buffer->page); in igc_get_rx_buffer()
1890 dma_sync_single_range_for_cpu(rx_ring->dev, in igc_get_rx_buffer()
1891 rx_buffer->dma, in igc_get_rx_buffer()
1892 rx_buffer->page_offset, in igc_get_rx_buffer()
1896 rx_buffer->pagecnt_bias--; in igc_get_rx_buffer()
1905 buffer->page_offset ^= truesize; in igc_rx_buffer_flip()
1907 buffer->page_offset += truesize; in igc_rx_buffer_flip()
1928 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1934 * This function will add the data contained in rx_buffer->page to the skb.
1950 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, in igc_add_rx_frag()
1951 rx_buffer->page_offset, size, truesize); in igc_add_rx_frag()
1960 unsigned int size = xdp->data_end - xdp->data; in igc_build_skb()
1962 unsigned int metasize = xdp->data - xdp->data_meta; in igc_build_skb()
1966 net_prefetch(xdp->data_meta); in igc_build_skb()
1969 skb = napi_build_skb(xdp->data_hard_start, truesize); in igc_build_skb()
1974 skb_reserve(skb, xdp->data - xdp->data_hard_start); in igc_build_skb()
1987 struct xdp_buff *xdp = &ctx->xdp; in igc_construct_skb()
1988 unsigned int metasize = xdp->data - xdp->data_meta; in igc_construct_skb()
1989 unsigned int size = xdp->data_end - xdp->data; in igc_construct_skb()
1991 void *va = xdp->data; in igc_construct_skb()
1996 net_prefetch(xdp->data_meta); in igc_construct_skb()
1999 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in igc_construct_skb()
2004 if (ctx->rx_ts) { in igc_construct_skb()
2005 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; in igc_construct_skb()
2006 skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; in igc_construct_skb()
2012 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); in igc_construct_skb()
2015 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, in igc_construct_skb()
2024 size -= headlen; in igc_construct_skb()
2026 skb_add_rx_frag(skb, 0, rx_buffer->page, in igc_construct_skb()
2027 (va + headlen) - page_address(rx_buffer->page), in igc_construct_skb()
2031 rx_buffer->pagecnt_bias++; in igc_construct_skb()
2038 * igc_reuse_rx_page - page flip buffer and store it back on the ring
2047 u16 nta = rx_ring->next_to_alloc; in igc_reuse_rx_page()
2050 new_buff = &rx_ring->rx_buffer_info[nta]; in igc_reuse_rx_page()
2054 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in igc_reuse_rx_page()
2060 new_buff->dma = old_buff->dma; in igc_reuse_rx_page()
2061 new_buff->page = old_buff->page; in igc_reuse_rx_page()
2062 new_buff->page_offset = old_buff->page_offset; in igc_reuse_rx_page()
2063 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in igc_reuse_rx_page()
2069 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in igc_can_reuse_rx_page()
2070 struct page *page = rx_buffer->page; in igc_can_reuse_rx_page()
2072 /* avoid re-using remote and pfmemalloc pages */ in igc_can_reuse_rx_page()
2078 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) in igc_can_reuse_rx_page()
2082 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) in igc_can_reuse_rx_page()
2084 if (rx_buffer->page_offset > IGC_LAST_OFFSET) in igc_can_reuse_rx_page()
2093 page_ref_add(page, USHRT_MAX - 1); in igc_can_reuse_rx_page()
2094 rx_buffer->pagecnt_bias = USHRT_MAX; in igc_can_reuse_rx_page()
2101 * igc_is_non_eop - process handling of non-EOP buffers
2108 * that this is in fact a non-EOP buffer.
2113 u32 ntc = rx_ring->next_to_clean + 1; in igc_is_non_eop()
2116 ntc = (ntc < rx_ring->count) ? ntc : 0; in igc_is_non_eop()
2117 rx_ring->next_to_clean = ntc; in igc_is_non_eop()
2128 * igc_cleanup_headers - Correct corrupted or empty headers
2146 struct net_device *netdev = rx_ring->netdev; in igc_cleanup_headers()
2148 if (!(netdev->features & NETIF_F_RXALL)) { in igc_cleanup_headers()
2172 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in igc_put_rx_buffer()
2175 __page_frag_cache_drain(rx_buffer->page, in igc_put_rx_buffer()
2176 rx_buffer->pagecnt_bias); in igc_put_rx_buffer()
2180 rx_buffer->page = NULL; in igc_put_rx_buffer()
2185 struct igc_adapter *adapter = rx_ring->q_vector->adapter; in igc_rx_offset()
2198 struct page *page = bi->page; in igc_alloc_mapped_page()
2208 rx_ring->rx_stats.alloc_failed++; in igc_alloc_mapped_page()
2209 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_alloc_mapped_page()
2214 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in igc_alloc_mapped_page()
2222 if (dma_mapping_error(rx_ring->dev, dma)) { in igc_alloc_mapped_page()
2225 rx_ring->rx_stats.alloc_failed++; in igc_alloc_mapped_page()
2226 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_alloc_mapped_page()
2230 bi->dma = dma; in igc_alloc_mapped_page()
2231 bi->page = page; in igc_alloc_mapped_page()
2232 bi->page_offset = igc_rx_offset(rx_ring); in igc_alloc_mapped_page()
2233 page_ref_add(page, USHRT_MAX - 1); in igc_alloc_mapped_page()
2234 bi->pagecnt_bias = USHRT_MAX; in igc_alloc_mapped_page()
2240 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
2247 u16 i = rx_ring->next_to_use; in igc_alloc_rx_buffers()
2256 bi = &rx_ring->rx_buffer_info[i]; in igc_alloc_rx_buffers()
2257 i -= rx_ring->count; in igc_alloc_rx_buffers()
2266 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in igc_alloc_rx_buffers()
2267 bi->page_offset, bufsz, in igc_alloc_rx_buffers()
2271 * because each write-back erases this info. in igc_alloc_rx_buffers()
2273 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in igc_alloc_rx_buffers()
2280 bi = rx_ring->rx_buffer_info; in igc_alloc_rx_buffers()
2281 i -= rx_ring->count; in igc_alloc_rx_buffers()
2285 rx_desc->wb.upper.length = 0; in igc_alloc_rx_buffers()
2287 cleaned_count--; in igc_alloc_rx_buffers()
2290 i += rx_ring->count; in igc_alloc_rx_buffers()
2292 if (rx_ring->next_to_use != i) { in igc_alloc_rx_buffers()
2294 rx_ring->next_to_use = i; in igc_alloc_rx_buffers()
2297 rx_ring->next_to_alloc = i; in igc_alloc_rx_buffers()
2301 * applicable for weak-ordered memory model archs, in igc_alloc_rx_buffers()
2302 * such as IA-64). in igc_alloc_rx_buffers()
2305 writel(i, rx_ring->tail); in igc_alloc_rx_buffers()
2312 u16 i = ring->next_to_use; in igc_alloc_rx_buffers_zc()
2323 bi = &ring->rx_buffer_info[i]; in igc_alloc_rx_buffers_zc()
2324 i -= ring->count; in igc_alloc_rx_buffers_zc()
2327 bi->xdp = xsk_buff_alloc(ring->xsk_pool); in igc_alloc_rx_buffers_zc()
2328 if (!bi->xdp) { in igc_alloc_rx_buffers_zc()
2333 dma = xsk_buff_xdp_get_dma(bi->xdp); in igc_alloc_rx_buffers_zc()
2334 desc->read.pkt_addr = cpu_to_le64(dma); in igc_alloc_rx_buffers_zc()
2341 bi = ring->rx_buffer_info; in igc_alloc_rx_buffers_zc()
2342 i -= ring->count; in igc_alloc_rx_buffers_zc()
2346 desc->wb.upper.length = 0; in igc_alloc_rx_buffers_zc()
2348 count--; in igc_alloc_rx_buffers_zc()
2351 i += ring->count; in igc_alloc_rx_buffers_zc()
2353 if (ring->next_to_use != i) { in igc_alloc_rx_buffers_zc()
2354 ring->next_to_use = i; in igc_alloc_rx_buffers_zc()
2358 * applicable for weak-ordered memory model archs, in igc_alloc_rx_buffers_zc()
2359 * such as IA-64). in igc_alloc_rx_buffers_zc()
2362 writel(i, ring->tail); in igc_alloc_rx_buffers_zc()
2373 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; in igc_xdp_init_tx_descriptor()
2374 u16 count, index = ring->next_to_use; in igc_xdp_init_tx_descriptor()
2375 struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; in igc_xdp_init_tx_descriptor()
2378 u32 olinfo_status, len = xdpf->len, cmd_type; in igc_xdp_init_tx_descriptor()
2379 void *data = xdpf->data; in igc_xdp_init_tx_descriptor()
2384 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); in igc_xdp_init_tx_descriptor()
2388 return -EBUSY; in igc_xdp_init_tx_descriptor()
2392 head->bytecount = xdp_get_frame_len(xdpf); in igc_xdp_init_tx_descriptor()
2393 head->type = IGC_TX_BUFFER_TYPE_XDP; in igc_xdp_init_tx_descriptor()
2394 head->gso_segs = 1; in igc_xdp_init_tx_descriptor()
2395 head->xdpf = xdpf; in igc_xdp_init_tx_descriptor()
2397 olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; in igc_xdp_init_tx_descriptor()
2398 desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_xdp_init_tx_descriptor()
2403 dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); in igc_xdp_init_tx_descriptor()
2404 if (dma_mapping_error(ring->dev, dma)) { in igc_xdp_init_tx_descriptor()
2405 netdev_err_once(ring->netdev, in igc_xdp_init_tx_descriptor()
2416 desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_xdp_init_tx_descriptor()
2417 desc->read.buffer_addr = cpu_to_le64(dma); in igc_xdp_init_tx_descriptor()
2419 buffer->protocol = 0; in igc_xdp_init_tx_descriptor()
2421 if (++index == ring->count) in igc_xdp_init_tx_descriptor()
2427 buffer = &ring->tx_buffer_info[index]; in igc_xdp_init_tx_descriptor()
2429 desc->read.olinfo_status = 0; in igc_xdp_init_tx_descriptor()
2431 data = skb_frag_address(&sinfo->frags[i]); in igc_xdp_init_tx_descriptor()
2432 len = skb_frag_size(&sinfo->frags[i]); in igc_xdp_init_tx_descriptor()
2435 desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); in igc_xdp_init_tx_descriptor()
2437 netdev_tx_sent_queue(txring_txq(ring), head->bytecount); in igc_xdp_init_tx_descriptor()
2439 head->time_stamp = jiffies; in igc_xdp_init_tx_descriptor()
2441 head->next_to_watch = desc; in igc_xdp_init_tx_descriptor()
2442 ring->next_to_use = index; in igc_xdp_init_tx_descriptor()
2448 buffer = &ring->tx_buffer_info[index]; in igc_xdp_init_tx_descriptor()
2450 dma_unmap_page(ring->dev, in igc_xdp_init_tx_descriptor()
2459 index += ring->count; in igc_xdp_init_tx_descriptor()
2460 index--; in igc_xdp_init_tx_descriptor()
2463 return -ENOMEM; in igc_xdp_init_tx_descriptor()
2474 while (index >= adapter->num_tx_queues) in igc_xdp_get_tx_ring()
2475 index -= adapter->num_tx_queues; in igc_xdp_get_tx_ring()
2477 return adapter->tx_ring[index]; in igc_xdp_get_tx_ring()
2489 return -EFAULT; in igc_xdp_xmit_back()
2517 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) in __igc_xdp_run_prog()
2522 bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); in __igc_xdp_run_prog()
2526 trace_xdp_exception(adapter->netdev, prog, act); in __igc_xdp_run_prog()
2538 prog = READ_ONCE(adapter->xdp_prog); in igc_xdp_run_prog()
2558 writel(ring->next_to_use, ring->tail); in igc_flush_tx_descriptors()
2583 struct igc_ring *ring = q_vector->rx.ring; in igc_update_rx_stats()
2585 u64_stats_update_begin(&ring->rx_syncp); in igc_update_rx_stats()
2586 ring->rx_stats.packets += packets; in igc_update_rx_stats()
2587 ring->rx_stats.bytes += bytes; in igc_update_rx_stats()
2588 u64_stats_update_end(&ring->rx_syncp); in igc_update_rx_stats()
2590 q_vector->rx.total_packets += packets; in igc_update_rx_stats()
2591 q_vector->rx.total_bytes += bytes; in igc_update_rx_stats()
2597 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_rx_irq()
2598 struct igc_ring *rx_ring = q_vector->rx.ring; in igc_clean_rx_irq()
2599 struct sk_buff *skb = rx_ring->skb; in igc_clean_rx_irq()
2618 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); in igc_clean_rx_irq()
2619 size = le16_to_cpu(rx_desc->wb.upper.length); in igc_clean_rx_irq()
2632 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; in igc_clean_rx_irq()
2637 size -= IGC_TS_HDR_LEN; in igc_clean_rx_irq()
2641 xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); in igc_clean_rx_irq()
2642 xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring), in igc_clean_rx_irq()
2654 rx_buffer->pagecnt_bias++; in igc_clean_rx_irq()
2674 rx_ring->rx_stats.alloc_failed++; in igc_clean_rx_irq()
2675 rx_buffer->pagecnt_bias++; in igc_clean_rx_irq()
2676 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_clean_rx_irq()
2683 /* fetch next buffer in frame if non-eop */ in igc_clean_rx_irq()
2694 total_bytes += skb->len; in igc_clean_rx_irq()
2699 napi_gro_receive(&q_vector->napi, skb); in igc_clean_rx_irq()
2712 rx_ring->skb = skb; in igc_clean_rx_irq()
2725 struct xdp_buff *xdp = &ctx->xdp; in igc_construct_skb_zc()
2726 unsigned int totalsize = xdp->data_end - xdp->data_meta; in igc_construct_skb_zc()
2727 unsigned int metasize = xdp->data - xdp->data_meta; in igc_construct_skb_zc()
2730 net_prefetch(xdp->data_meta); in igc_construct_skb_zc()
2732 skb = napi_alloc_skb(&ring->q_vector->napi, totalsize); in igc_construct_skb_zc()
2736 memcpy(__skb_put(skb, totalsize), xdp->data_meta, in igc_construct_skb_zc()
2744 if (ctx->rx_ts) { in igc_construct_skb_zc()
2745 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; in igc_construct_skb_zc()
2746 skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; in igc_construct_skb_zc()
2756 struct igc_ring *ring = q_vector->rx.ring; in igc_dispatch_skb_zc()
2761 ring->rx_stats.alloc_failed++; in igc_dispatch_skb_zc()
2762 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags); in igc_dispatch_skb_zc()
2770 napi_gro_receive(&q_vector->napi, skb); in igc_dispatch_skb_zc()
2777 * igc_xdp_buff fields fall into xdp_buff_xsk->cb in xsk_buff_to_igc_ctx()
2784 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_rx_irq_zc()
2785 struct igc_ring *ring = q_vector->rx.ring; in igc_clean_rx_irq_zc()
2788 u16 ntc = ring->next_to_clean; in igc_clean_rx_irq_zc()
2795 prog = READ_ONCE(adapter->xdp_prog); in igc_clean_rx_irq_zc()
2805 size = le16_to_cpu(desc->wb.upper.length); in igc_clean_rx_irq_zc()
2815 bi = &ring->rx_buffer_info[ntc]; in igc_clean_rx_irq_zc()
2817 ctx = xsk_buff_to_igc_ctx(bi->xdp); in igc_clean_rx_irq_zc()
2818 ctx->rx_desc = desc; in igc_clean_rx_irq_zc()
2821 ctx->rx_ts = bi->xdp->data; in igc_clean_rx_irq_zc()
2823 bi->xdp->data += IGC_TS_HDR_LEN; in igc_clean_rx_irq_zc()
2828 bi->xdp->data_meta += IGC_TS_HDR_LEN; in igc_clean_rx_irq_zc()
2829 size -= IGC_TS_HDR_LEN; in igc_clean_rx_irq_zc()
2831 ctx->rx_ts = NULL; in igc_clean_rx_irq_zc()
2834 bi->xdp->data_end = bi->xdp->data + size; in igc_clean_rx_irq_zc()
2835 xsk_buff_dma_sync_for_cpu(bi->xdp); in igc_clean_rx_irq_zc()
2837 res = __igc_xdp_run_prog(adapter, prog, bi->xdp); in igc_clean_rx_irq_zc()
2843 xsk_buff_free(bi->xdp); in igc_clean_rx_irq_zc()
2851 bi->xdp = NULL; in igc_clean_rx_irq_zc()
2856 if (ntc == ring->count) in igc_clean_rx_irq_zc()
2860 ring->next_to_clean = ntc; in igc_clean_rx_irq_zc()
2871 if (xsk_uses_need_wakeup(ring->xsk_pool)) { in igc_clean_rx_irq_zc()
2872 if (failure || ring->next_to_clean == ring->next_to_use) in igc_clean_rx_irq_zc()
2873 xsk_set_rx_need_wakeup(ring->xsk_pool); in igc_clean_rx_irq_zc()
2875 xsk_clear_rx_need_wakeup(ring->xsk_pool); in igc_clean_rx_irq_zc()
2885 struct igc_ring *ring = q_vector->tx.ring; in igc_update_tx_stats()
2887 u64_stats_update_begin(&ring->tx_syncp); in igc_update_tx_stats()
2888 ring->tx_stats.bytes += bytes; in igc_update_tx_stats()
2889 ring->tx_stats.packets += packets; in igc_update_tx_stats()
2890 u64_stats_update_end(&ring->tx_syncp); in igc_update_tx_stats()
2892 q_vector->tx.total_bytes += bytes; in igc_update_tx_stats()
2893 q_vector->tx.total_packets += packets; in igc_update_tx_stats()
2899 struct igc_ring *tx_ring = meta_req->tx_ring; in igc_xsk_request_timestamp()
2907 if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) { in igc_xsk_request_timestamp()
2908 adapter = netdev_priv(tx_ring->netdev); in igc_xsk_request_timestamp()
2910 spin_lock_irqsave(&adapter->ptp_tx_lock, lock_flags); in igc_xsk_request_timestamp()
2914 tstamp = &adapter->tx_tstamp[i]; in igc_xsk_request_timestamp()
2916 /* tstamp->skb and tstamp->xsk_tx_buffer are in union. in igc_xsk_request_timestamp()
2917 * When tstamp->skb is equal to NULL, in igc_xsk_request_timestamp()
2918 * tstamp->xsk_tx_buffer is equal to NULL as well. in igc_xsk_request_timestamp()
2922 if (!tstamp->skb) { in igc_xsk_request_timestamp()
2930 adapter->tx_hwtstamp_skipped++; in igc_xsk_request_timestamp()
2931 spin_unlock_irqrestore(&adapter->ptp_tx_lock, in igc_xsk_request_timestamp()
2936 tstamp->start = jiffies; in igc_xsk_request_timestamp()
2937 tstamp->xsk_queue_index = tx_ring->queue_index; in igc_xsk_request_timestamp()
2938 tstamp->xsk_tx_buffer = meta_req->tx_buffer; in igc_xsk_request_timestamp()
2939 tstamp->buffer_type = IGC_TX_BUFFER_TYPE_XSK; in igc_xsk_request_timestamp()
2942 meta_req->tx_buffer->xsk_pending_ts = true; in igc_xsk_request_timestamp()
2948 xsk_tx_metadata_to_compl(meta_req->meta, &tstamp->xsk_meta); in igc_xsk_request_timestamp()
2951 tx_flags |= tstamp->flags; in igc_xsk_request_timestamp()
2952 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2955 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2958 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2961 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2965 spin_unlock_irqrestore(&adapter->ptp_tx_lock, lock_flags); in igc_xsk_request_timestamp()
2977 struct igc_ring *tx_ring = meta_req->tx_ring; in igc_xsk_request_launch_time()
2983 if (!tx_ring->launchtime_enable) in igc_xsk_request_launch_time()
2996 meta_req->tx_buffer = in igc_xsk_request_launch_time()
2997 &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igc_xsk_request_launch_time()
3009 meta_req->used_desc += used_desc; in igc_xsk_request_launch_time()
3020 struct xsk_buff_pool *pool = ring->xsk_pool; in igc_xdp_xmit_zc()
3027 if (!netif_carrier_ok(ring->netdev)) in igc_xdp_xmit_zc()
3035 ntu = ring->next_to_use; in igc_xdp_xmit_zc()
3060 bi = &ring->tx_buffer_info[ntu]; in igc_xdp_xmit_zc()
3070 ntu = ring->next_to_use; in igc_xdp_xmit_zc()
3076 budget -= meta_req.used_desc; in igc_xdp_xmit_zc()
3079 tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type); in igc_xdp_xmit_zc()
3080 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_xdp_xmit_zc()
3081 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igc_xdp_xmit_zc()
3083 bi->type = IGC_TX_BUFFER_TYPE_XSK; in igc_xdp_xmit_zc()
3084 bi->protocol = 0; in igc_xdp_xmit_zc()
3085 bi->bytecount = xdp_desc.len; in igc_xdp_xmit_zc()
3086 bi->gso_segs = 1; in igc_xdp_xmit_zc()
3087 bi->time_stamp = jiffies; in igc_xdp_xmit_zc()
3088 bi->next_to_watch = tx_desc; in igc_xdp_xmit_zc()
3093 if (ntu == ring->count) in igc_xdp_xmit_zc()
3096 ring->next_to_use = ntu; in igc_xdp_xmit_zc()
3097 budget--; in igc_xdp_xmit_zc()
3109 * igc_clean_tx_irq - Reclaim resources after transmit completes
3117 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_tx_irq()
3119 unsigned int budget = q_vector->tx.work_limit; in igc_clean_tx_irq()
3120 struct igc_ring *tx_ring = q_vector->tx.ring; in igc_clean_tx_irq()
3121 unsigned int i = tx_ring->next_to_clean; in igc_clean_tx_irq()
3126 if (test_bit(__IGC_DOWN, &adapter->state)) in igc_clean_tx_irq()
3129 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_clean_tx_irq()
3131 i -= tx_ring->count; in igc_clean_tx_irq()
3134 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; in igc_clean_tx_irq()
3144 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) in igc_clean_tx_irq()
3150 if (tx_buffer->type == IGC_TX_BUFFER_TYPE_XSK && in igc_clean_tx_irq()
3151 tx_buffer->xsk_pending_ts) in igc_clean_tx_irq()
3155 tx_buffer->next_to_watch = NULL; in igc_clean_tx_irq()
3158 total_bytes += tx_buffer->bytecount; in igc_clean_tx_irq()
3159 total_packets += tx_buffer->gso_segs; in igc_clean_tx_irq()
3161 switch (tx_buffer->type) { in igc_clean_tx_irq()
3166 xdp_return_frame(tx_buffer->xdpf); in igc_clean_tx_irq()
3167 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
3170 napi_consume_skb(tx_buffer->skb, napi_budget); in igc_clean_tx_irq()
3171 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
3174 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); in igc_clean_tx_irq()
3184 i -= tx_ring->count; in igc_clean_tx_irq()
3185 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_irq()
3191 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
3199 i -= tx_ring->count; in igc_clean_tx_irq()
3200 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_irq()
3208 budget--; in igc_clean_tx_irq()
3214 i += tx_ring->count; in igc_clean_tx_irq()
3215 tx_ring->next_to_clean = i; in igc_clean_tx_irq()
3219 if (tx_ring->xsk_pool) { in igc_clean_tx_irq()
3221 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_irq()
3222 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) in igc_clean_tx_irq()
3223 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); in igc_clean_tx_irq()
3227 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { in igc_clean_tx_irq()
3228 struct igc_hw *hw = &adapter->hw; in igc_clean_tx_irq()
3233 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igc_clean_tx_irq()
3234 if (tx_buffer->next_to_watch && in igc_clean_tx_irq()
3235 time_after(jiffies, tx_buffer->time_stamp + in igc_clean_tx_irq()
3236 (adapter->tx_timeout_factor * HZ)) && in igc_clean_tx_irq()
3238 (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) && in igc_clean_tx_irq()
3239 !tx_ring->oper_gate_closed) { in igc_clean_tx_irq()
3241 netdev_err(tx_ring->netdev, in igc_clean_tx_irq()
3253 tx_ring->queue_index, in igc_clean_tx_irq()
3254 rd32(IGC_TDH(tx_ring->reg_idx)), in igc_clean_tx_irq()
3255 readl(tx_ring->tail), in igc_clean_tx_irq()
3256 tx_ring->next_to_use, in igc_clean_tx_irq()
3257 tx_ring->next_to_clean, in igc_clean_tx_irq()
3258 tx_buffer->time_stamp, in igc_clean_tx_irq()
3259 tx_buffer->next_to_watch, in igc_clean_tx_irq()
3261 tx_buffer->next_to_watch->wb.status); in igc_clean_tx_irq()
3262 netif_stop_subqueue(tx_ring->netdev, in igc_clean_tx_irq()
3263 tx_ring->queue_index); in igc_clean_tx_irq()
3272 netif_carrier_ok(tx_ring->netdev) && in igc_clean_tx_irq()
3278 if (__netif_subqueue_stopped(tx_ring->netdev, in igc_clean_tx_irq()
3279 tx_ring->queue_index) && in igc_clean_tx_irq()
3280 !(test_bit(__IGC_DOWN, &adapter->state))) { in igc_clean_tx_irq()
3281 netif_wake_subqueue(tx_ring->netdev, in igc_clean_tx_irq()
3282 tx_ring->queue_index); in igc_clean_tx_irq()
3284 u64_stats_update_begin(&tx_ring->tx_syncp); in igc_clean_tx_irq()
3285 tx_ring->tx_stats.restart_queue++; in igc_clean_tx_irq()
3286 u64_stats_update_end(&tx_ring->tx_syncp); in igc_clean_tx_irq()
3296 struct igc_hw *hw = &adapter->hw; in igc_find_mac_filter()
3297 int max_entries = hw->mac.rar_entry_count; in igc_find_mac_filter()
3318 return -1; in igc_find_mac_filter()
3323 struct igc_hw *hw = &adapter->hw; in igc_get_avail_mac_filter_slot()
3324 int max_entries = hw->mac.rar_entry_count; in igc_get_avail_mac_filter_slot()
3335 return -1; in igc_get_avail_mac_filter_slot()
3339 * igc_add_mac_filter() - Add MAC address filter
3343 * @queue: If non-negative, queue assignment feature is enabled and frames
3353 struct net_device *dev = adapter->netdev; in igc_add_mac_filter()
3362 return -ENOSPC; in igc_add_mac_filter()
3374 * igc_del_mac_filter() - Delete MAC address filter
3382 struct net_device *dev = adapter->netdev; in igc_del_mac_filter()
3396 igc_set_mac_filter_hw(adapter, 0, type, addr, -1); in igc_del_mac_filter()
3408 * igc_add_vlan_prio_filter() - Add VLAN priority filter
3418 struct net_device *dev = adapter->netdev; in igc_add_vlan_prio_filter()
3419 struct igc_hw *hw = &adapter->hw; in igc_add_vlan_prio_filter()
3426 return -EEXIST; in igc_add_vlan_prio_filter()
3440 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
3446 struct igc_hw *hw = &adapter->hw; in igc_del_vlan_prio_filter()
3456 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", in igc_del_vlan_prio_filter()
3462 struct igc_hw *hw = &adapter->hw; in igc_get_avail_etype_filter_slot()
3472 return -1; in igc_get_avail_etype_filter_slot()
3476 * igc_add_etype_filter() - Add ethertype filter
3479 * @queue: If non-negative, queue assignment feature is enabled and frames
3488 struct igc_hw *hw = &adapter->hw; in igc_add_etype_filter()
3494 return -ENOSPC; in igc_add_etype_filter()
3511 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", in igc_add_etype_filter()
3518 struct igc_hw *hw = &adapter->hw; in igc_find_etype_filter()
3528 return -1; in igc_find_etype_filter()
3532 * igc_del_etype_filter() - Delete ethertype filter
3538 struct igc_hw *hw = &adapter->hw; in igc_del_etype_filter()
3547 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", in igc_del_etype_filter()
3555 struct igc_hw *hw = &adapter->hw; in igc_flex_filter_select()
3559 if (input->index >= MAX_FLEX_FILTER) { in igc_flex_filter_select()
3560 netdev_err(adapter->netdev, "Wrong Flex Filter index selected!\n"); in igc_flex_filter_select()
3561 return -EINVAL; in igc_flex_filter_select()
3567 switch (input->index) { in igc_flex_filter_select()
3584 fhft_index = input->index % 8; in igc_flex_filter_select()
3587 IGC_FHFT_EXT(fhft_index - 4); in igc_flex_filter_select()
3595 struct igc_hw *hw = &adapter->hw; in igc_write_flex_filter_ll()
3596 u8 *data = input->data; in igc_write_flex_filter_ll()
3597 u8 *mask = input->mask; in igc_write_flex_filter_ll()
3607 if (input->length % 8 != 0) { in igc_write_flex_filter_ll()
3608 netdev_err(adapter->netdev, "The length of a flex filter has to be 8 byte aligned!\n"); in igc_write_flex_filter_ll()
3609 return -EINVAL; in igc_write_flex_filter_ll()
3625 queuing = input->length & IGC_FHFT_LENGTH_MASK; in igc_write_flex_filter_ll()
3626 queuing |= FIELD_PREP(IGC_FHFT_QUEUE_MASK, input->rx_queue); in igc_write_flex_filter_ll()
3627 queuing |= FIELD_PREP(IGC_FHFT_PRIO_MASK, input->prio); in igc_write_flex_filter_ll()
3629 if (input->immediate_irq) in igc_write_flex_filter_ll()
3632 if (input->drop) in igc_write_flex_filter_ll()
3666 if (input->index > 8) { in igc_write_flex_filter_ll()
3667 /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ in igc_write_flex_filter_ll()
3670 wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); in igc_write_flex_filter_ll()
3674 wufc |= (IGC_WUFC_FLX0 << input->index); in igc_write_flex_filter_ll()
3678 netdev_dbg(adapter->netdev, "Added flex filter %u to HW.\n", in igc_write_flex_filter_ll()
3679 input->index); in igc_write_flex_filter_ll()
3691 memcpy(&flex->data[offset], src, len); in igc_flex_filter_add_field()
3700 flex->mask[idx / 8] |= BIT(idx % 8); in igc_flex_filter_add_field()
3705 flex->mask[idx / 8] |= BIT(idx % 8); in igc_flex_filter_add_field()
3711 struct igc_hw *hw = &adapter->hw; in igc_find_avail_flex_filter_slot()
3723 if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) in igc_find_avail_flex_filter_slot()
3728 return -ENOSPC; in igc_find_avail_flex_filter_slot()
3733 struct igc_hw *hw = &adapter->hw; in igc_flex_filter_in_use()
3751 struct igc_nfc_filter *filter = &rule->filter; in igc_add_flex_filter()
3759 return -ENOSPC; in igc_add_flex_filter()
3762 * -> dest_mac [6] in igc_add_flex_filter()
3763 * -> src_mac [6] in igc_add_flex_filter()
3764 * -> tpid [2] in igc_add_flex_filter()
3765 * -> vlan tci [2] in igc_add_flex_filter()
3766 * -> ether type [2] in igc_add_flex_filter()
3767 * -> user data [8] in igc_add_flex_filter()
3768 * -> = 26 bytes => 32 length in igc_add_flex_filter()
3772 flex.rx_queue = rule->action; in igc_add_flex_filter()
3774 vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; in igc_add_flex_filter()
3779 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) in igc_add_flex_filter()
3780 igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, in igc_add_flex_filter()
3784 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) in igc_add_flex_filter()
3785 igc_flex_filter_add_field(&flex, &filter->src_addr, 6, in igc_add_flex_filter()
3789 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) { in igc_add_flex_filter()
3790 __be16 vlan_etype = cpu_to_be16(filter->vlan_etype); in igc_add_flex_filter()
3797 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) in igc_add_flex_filter()
3798 igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, in igc_add_flex_filter()
3799 sizeof(filter->vlan_tci), NULL); in igc_add_flex_filter()
3802 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { in igc_add_flex_filter()
3803 __be16 etype = cpu_to_be16(filter->etype); in igc_add_flex_filter()
3810 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) in igc_add_flex_filter()
3811 igc_flex_filter_add_field(&flex, &filter->user_data, in igc_add_flex_filter()
3813 sizeof(filter->user_data), in igc_add_flex_filter()
3814 filter->user_mask); in igc_add_flex_filter()
3821 filter->flex_index = index; in igc_add_flex_filter()
3829 struct igc_hw *hw = &adapter->hw; in igc_del_flex_filter()
3839 wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); in igc_del_flex_filter()
3862 if (rule->flex) { in igc_enable_nfc_rule()
3866 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { in igc_enable_nfc_rule()
3867 err = igc_add_etype_filter(adapter, rule->filter.etype, in igc_enable_nfc_rule()
3868 rule->action); in igc_enable_nfc_rule()
3873 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { in igc_enable_nfc_rule()
3875 rule->filter.src_addr, rule->action); in igc_enable_nfc_rule()
3880 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { in igc_enable_nfc_rule()
3882 rule->filter.dst_addr, rule->action); in igc_enable_nfc_rule()
3887 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { in igc_enable_nfc_rule()
3888 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); in igc_enable_nfc_rule()
3890 err = igc_add_vlan_prio_filter(adapter, prio, rule->action); in igc_enable_nfc_rule()
3901 if (rule->flex) { in igc_disable_nfc_rule()
3902 igc_del_flex_filter(adapter, rule->filter.flex_index); in igc_disable_nfc_rule()
3906 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) in igc_disable_nfc_rule()
3907 igc_del_etype_filter(adapter, rule->filter.etype); in igc_disable_nfc_rule()
3909 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { in igc_disable_nfc_rule()
3910 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); in igc_disable_nfc_rule()
3915 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) in igc_disable_nfc_rule()
3917 rule->filter.src_addr); in igc_disable_nfc_rule()
3919 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) in igc_disable_nfc_rule()
3921 rule->filter.dst_addr); in igc_disable_nfc_rule()
3925 * igc_get_nfc_rule() - Get NFC rule
3929 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3938 list_for_each_entry(rule, &adapter->nfc_rule_list, list) { in igc_get_nfc_rule()
3939 if (rule->location == location) in igc_get_nfc_rule()
3941 if (rule->location > location) in igc_get_nfc_rule()
3949 * igc_del_nfc_rule() - Delete NFC rule
3955 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3961 list_del(&rule->list); in igc_del_nfc_rule()
3962 adapter->nfc_rule_count--; in igc_del_nfc_rule()
3971 mutex_lock(&adapter->nfc_rule_lock); in igc_flush_nfc_rules()
3973 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) in igc_flush_nfc_rules()
3976 mutex_unlock(&adapter->nfc_rule_lock); in igc_flush_nfc_rules()
3980 * igc_add_nfc_rule() - Add NFC rule
3986 * Context: Expects adapter->nfc_rule_lock to be held by caller.
4000 list_for_each_entry(cur, &adapter->nfc_rule_list, list) { in igc_add_nfc_rule()
4001 if (cur->location >= rule->location) in igc_add_nfc_rule()
4006 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); in igc_add_nfc_rule()
4007 adapter->nfc_rule_count++; in igc_add_nfc_rule()
4015 mutex_lock(&adapter->nfc_rule_lock); in igc_restore_nfc_rules()
4017 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) in igc_restore_nfc_rules()
4020 mutex_unlock(&adapter->nfc_rule_lock); in igc_restore_nfc_rules()
4027 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); in igc_uc_sync()
4039 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
4045 * promiscuous mode, and all-multi behavior.
4050 struct igc_hw *hw = &adapter->hw; in igc_set_rx_mode()
4055 if (netdev->flags & IFF_PROMISC) { in igc_set_rx_mode()
4058 if (netdev->flags & IFF_ALLMULTI) { in igc_set_rx_mode()
4083 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) in igc_set_rx_mode()
4090 * igc_configure - configure the hardware for RX and TX
4095 struct net_device *netdev = adapter->netdev; in igc_configure()
4113 igc_rx_fifo_flush_base(&adapter->hw); in igc_configure()
4119 for (i = 0; i < adapter->num_rx_queues; i++) { in igc_configure()
4120 struct igc_ring *ring = adapter->rx_ring[i]; in igc_configure()
4122 if (ring->xsk_pool) in igc_configure()
4130 * igc_write_ivar - configure ivar for given MSI-X vector
4156 struct igc_adapter *adapter = q_vector->adapter; in igc_assign_vector()
4157 struct igc_hw *hw = &adapter->hw; in igc_assign_vector()
4161 if (q_vector->rx.ring) in igc_assign_vector()
4162 rx_queue = q_vector->rx.ring->reg_idx; in igc_assign_vector()
4163 if (q_vector->tx.ring) in igc_assign_vector()
4164 tx_queue = q_vector->tx.ring->reg_idx; in igc_assign_vector()
4166 switch (hw->mac.type) { in igc_assign_vector()
4176 q_vector->eims_value = BIT(msix_vector); in igc_assign_vector()
4179 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); in igc_assign_vector()
4184 adapter->eims_enable_mask |= q_vector->eims_value; in igc_assign_vector()
4187 q_vector->set_itr = 1; in igc_assign_vector()
4191 * igc_configure_msix - Configure MSI-X hardware
4195 * generate MSI-X interrupts.
4199 struct igc_hw *hw = &adapter->hw; in igc_configure_msix()
4203 adapter->eims_enable_mask = 0; in igc_configure_msix()
4206 switch (hw->mac.type) { in igc_configure_msix()
4208 /* Turn on MSI-X capability first, or our settings in igc_configure_msix()
4216 adapter->eims_other = BIT(vector); in igc_configure_msix()
4222 /* do nothing, since nothing else supports MSI-X */ in igc_configure_msix()
4224 } /* switch (hw->mac.type) */ in igc_configure_msix()
4226 adapter->eims_enable_mask |= adapter->eims_other; in igc_configure_msix()
4228 for (i = 0; i < adapter->num_q_vectors; i++) in igc_configure_msix()
4229 igc_assign_vector(adapter->q_vector[i], vector++); in igc_configure_msix()
4235 * igc_irq_enable - Enable default interrupt generation settings
4240 struct igc_hw *hw = &adapter->hw; in igc_irq_enable()
4242 if (adapter->msix_entries) { in igc_irq_enable()
4246 wr32(IGC_EIAC, regval | adapter->eims_enable_mask); in igc_irq_enable()
4248 wr32(IGC_EIAM, regval | adapter->eims_enable_mask); in igc_irq_enable()
4249 wr32(IGC_EIMS, adapter->eims_enable_mask); in igc_irq_enable()
4258 * igc_irq_disable - Mask off interrupt generation on the NIC
4263 struct igc_hw *hw = &adapter->hw; in igc_irq_disable()
4265 if (adapter->msix_entries) { in igc_irq_disable()
4268 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); in igc_irq_disable()
4269 wr32(IGC_EIMC, adapter->eims_enable_mask); in igc_irq_disable()
4271 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); in igc_irq_disable()
4278 if (adapter->msix_entries) { in igc_irq_disable()
4281 synchronize_irq(adapter->msix_entries[vector++].vector); in igc_irq_disable()
4283 for (i = 0; i < adapter->num_q_vectors; i++) in igc_irq_disable()
4284 synchronize_irq(adapter->msix_entries[vector++].vector); in igc_irq_disable()
4286 synchronize_irq(adapter->pdev->irq); in igc_irq_disable()
4297 if (adapter->rss_queues > (max_rss_queues / 2)) in igc_set_flag_queue_pairs()
4298 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; in igc_set_flag_queue_pairs()
4300 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; in igc_set_flag_queue_pairs()
4313 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); in igc_init_queue_configuration()
4319 * igc_reset_q_vector - Reset config for interrupt vector
4328 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; in igc_reset_q_vector()
4336 if (q_vector->tx.ring) in igc_reset_q_vector()
4337 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; in igc_reset_q_vector()
4339 if (q_vector->rx.ring) in igc_reset_q_vector()
4340 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; in igc_reset_q_vector()
4342 netif_napi_del(&q_vector->napi); in igc_reset_q_vector()
4346 * igc_free_q_vector - Free memory allocated for specific interrupt vector
4354 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; in igc_free_q_vector()
4356 adapter->q_vector[v_idx] = NULL; in igc_free_q_vector()
4366 * igc_free_q_vectors - Free memory allocated for interrupt vectors
4375 int v_idx = adapter->num_q_vectors; in igc_free_q_vectors()
4377 adapter->num_tx_queues = 0; in igc_free_q_vectors()
4378 adapter->num_rx_queues = 0; in igc_free_q_vectors()
4379 adapter->num_q_vectors = 0; in igc_free_q_vectors()
4381 while (v_idx--) { in igc_free_q_vectors()
4388 * igc_update_itr - update the dynamic ITR value based on statistics
4399 * NOTE: These calculations are only valid when operating in a single-
4405 unsigned int packets = ring_container->total_packets; in igc_update_itr()
4406 unsigned int bytes = ring_container->total_bytes; in igc_update_itr()
4407 u8 itrval = ring_container->itr; in igc_update_itr()
4447 ring_container->total_bytes = 0; in igc_update_itr()
4448 ring_container->total_packets = 0; in igc_update_itr()
4451 ring_container->itr = itrval; in igc_update_itr()
4456 struct igc_adapter *adapter = q_vector->adapter; in igc_set_itr()
4457 u32 new_itr = q_vector->itr_val; in igc_set_itr()
4460 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ in igc_set_itr()
4461 switch (adapter->link_speed) { in igc_set_itr()
4471 igc_update_itr(q_vector, &q_vector->tx); in igc_set_itr()
4472 igc_update_itr(q_vector, &q_vector->rx); in igc_set_itr()
4474 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); in igc_set_itr()
4478 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igc_set_itr()
4479 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igc_set_itr()
4498 if (new_itr != q_vector->itr_val) { in igc_set_itr()
4503 new_itr = new_itr > q_vector->itr_val ? in igc_set_itr()
4504 max((new_itr * q_vector->itr_val) / in igc_set_itr()
4505 (new_itr + (q_vector->itr_val >> 2)), in igc_set_itr()
4513 q_vector->itr_val = new_itr; in igc_set_itr()
4514 q_vector->set_itr = 1; in igc_set_itr()
4520 int v_idx = adapter->num_q_vectors; in igc_reset_interrupt_capability()
4522 if (adapter->msix_entries) { in igc_reset_interrupt_capability()
4523 pci_disable_msix(adapter->pdev); in igc_reset_interrupt_capability()
4524 kfree(adapter->msix_entries); in igc_reset_interrupt_capability()
4525 adapter->msix_entries = NULL; in igc_reset_interrupt_capability()
4526 } else if (adapter->flags & IGC_FLAG_HAS_MSI) { in igc_reset_interrupt_capability()
4527 pci_disable_msi(adapter->pdev); in igc_reset_interrupt_capability()
4530 while (v_idx--) in igc_reset_interrupt_capability()
4535 * igc_set_interrupt_capability - set MSI or MSI-X if supported
4537 * @msix: boolean value for MSI-X capability
4550 adapter->flags |= IGC_FLAG_HAS_MSIX; in igc_set_interrupt_capability()
4553 adapter->num_rx_queues = adapter->rss_queues; in igc_set_interrupt_capability()
4555 adapter->num_tx_queues = adapter->rss_queues; in igc_set_interrupt_capability()
4558 numvecs = adapter->num_rx_queues; in igc_set_interrupt_capability()
4561 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) in igc_set_interrupt_capability()
4562 numvecs += adapter->num_tx_queues; in igc_set_interrupt_capability()
4565 adapter->num_q_vectors = numvecs; in igc_set_interrupt_capability()
4570 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), in igc_set_interrupt_capability()
4573 if (!adapter->msix_entries) in igc_set_interrupt_capability()
4578 adapter->msix_entries[i].entry = i; in igc_set_interrupt_capability()
4580 err = pci_enable_msix_range(adapter->pdev, in igc_set_interrupt_capability()
4581 adapter->msix_entries, in igc_set_interrupt_capability()
4587 kfree(adapter->msix_entries); in igc_set_interrupt_capability()
4588 adapter->msix_entries = NULL; in igc_set_interrupt_capability()
4593 adapter->flags &= ~IGC_FLAG_HAS_MSIX; in igc_set_interrupt_capability()
4595 adapter->rss_queues = 1; in igc_set_interrupt_capability()
4596 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; in igc_set_interrupt_capability()
4597 adapter->num_rx_queues = 1; in igc_set_interrupt_capability()
4598 adapter->num_tx_queues = 1; in igc_set_interrupt_capability()
4599 adapter->num_q_vectors = 1; in igc_set_interrupt_capability()
4600 if (!pci_enable_msi(adapter->pdev)) in igc_set_interrupt_capability()
4601 adapter->flags |= IGC_FLAG_HAS_MSI; in igc_set_interrupt_capability()
4605 * igc_update_ring_itr - update the dynamic ITR value based on packet size
4620 struct igc_adapter *adapter = q_vector->adapter; in igc_update_ring_itr()
4621 int new_val = q_vector->itr_val; in igc_update_ring_itr()
4625 /* For non-gigabit speeds, just fix the interrupt rate at 4000 in igc_update_ring_itr()
4626 * ints/sec - ITR timer value of 120 ticks. in igc_update_ring_itr()
4628 switch (adapter->link_speed) { in igc_update_ring_itr()
4637 packets = q_vector->rx.total_packets; in igc_update_ring_itr()
4639 avg_wire_size = q_vector->rx.total_bytes / packets; in igc_update_ring_itr()
4641 packets = q_vector->tx.total_packets; in igc_update_ring_itr()
4644 q_vector->tx.total_bytes / packets); in igc_update_ring_itr()
4656 /* Give a little boost to mid-size frames */ in igc_update_ring_itr()
4664 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igc_update_ring_itr()
4665 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igc_update_ring_itr()
4669 if (new_val != q_vector->itr_val) { in igc_update_ring_itr()
4670 q_vector->itr_val = new_val; in igc_update_ring_itr()
4671 q_vector->set_itr = 1; in igc_update_ring_itr()
4674 q_vector->rx.total_bytes = 0; in igc_update_ring_itr()
4675 q_vector->rx.total_packets = 0; in igc_update_ring_itr()
4676 q_vector->tx.total_bytes = 0; in igc_update_ring_itr()
4677 q_vector->tx.total_packets = 0; in igc_update_ring_itr()
4682 struct igc_adapter *adapter = q_vector->adapter; in igc_ring_irq_enable()
4683 struct igc_hw *hw = &adapter->hw; in igc_ring_irq_enable()
4685 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || in igc_ring_irq_enable()
4686 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { in igc_ring_irq_enable()
4687 if (adapter->num_q_vectors == 1) in igc_ring_irq_enable()
4693 if (!test_bit(__IGC_DOWN, &adapter->state)) { in igc_ring_irq_enable()
4694 if (adapter->msix_entries) in igc_ring_irq_enable()
4695 wr32(IGC_EIMS, q_vector->eims_value); in igc_ring_irq_enable()
4704 head->ring = ring; in igc_add_ring()
4705 head->count++; in igc_add_ring()
4709 * igc_cache_ring_register - Descriptor ring to register mapping
4712 * Once we know the feature-set enabled for the device, we'll cache
4719 switch (adapter->hw.mac.type) { in igc_cache_ring_register()
4722 for (; i < adapter->num_rx_queues; i++) in igc_cache_ring_register()
4723 adapter->rx_ring[i]->reg_idx = i; in igc_cache_ring_register()
4724 for (; j < adapter->num_tx_queues; j++) in igc_cache_ring_register()
4725 adapter->tx_ring[j]->reg_idx = j; in igc_cache_ring_register()
4731 * igc_poll - NAPI Rx polling callback
4740 struct igc_ring *rx_ring = q_vector->rx.ring; in igc_poll()
4744 if (q_vector->tx.ring) in igc_poll()
4748 int cleaned = rx_ring->xsk_pool ? in igc_poll()
4761 /* Exit the polling mode, but don't re-enable interrupts if stack might in igc_poll()
4762 * poll us due to busy-polling in igc_poll()
4767 return min(work_done, budget - 1); in igc_poll()
4771 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
4780 * We allocate one q_vector. If allocation fails we return -ENOMEM.
4793 return -ENOMEM; in igc_alloc_q_vector()
4798 q_vector = adapter->q_vector[v_idx]; in igc_alloc_q_vector()
4805 return -ENOMEM; in igc_alloc_q_vector()
4808 netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); in igc_alloc_q_vector()
4811 adapter->q_vector[v_idx] = q_vector; in igc_alloc_q_vector()
4812 q_vector->adapter = adapter; in igc_alloc_q_vector()
4815 q_vector->tx.work_limit = adapter->tx_work_limit; in igc_alloc_q_vector()
4818 q_vector->itr_register = adapter->io_addr + IGC_EITR(0); in igc_alloc_q_vector()
4819 q_vector->itr_val = IGC_START_ITR; in igc_alloc_q_vector()
4822 ring = q_vector->ring; in igc_alloc_q_vector()
4827 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) in igc_alloc_q_vector()
4828 q_vector->itr_val = adapter->rx_itr_setting; in igc_alloc_q_vector()
4831 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) in igc_alloc_q_vector()
4832 q_vector->itr_val = adapter->tx_itr_setting; in igc_alloc_q_vector()
4837 ring->dev = &adapter->pdev->dev; in igc_alloc_q_vector()
4838 ring->netdev = adapter->netdev; in igc_alloc_q_vector()
4841 ring->q_vector = q_vector; in igc_alloc_q_vector()
4844 igc_add_ring(ring, &q_vector->tx); in igc_alloc_q_vector()
4847 ring->count = adapter->tx_ring_count; in igc_alloc_q_vector()
4848 ring->queue_index = txr_idx; in igc_alloc_q_vector()
4851 adapter->tx_ring[txr_idx] = ring; in igc_alloc_q_vector()
4859 ring->dev = &adapter->pdev->dev; in igc_alloc_q_vector()
4860 ring->netdev = adapter->netdev; in igc_alloc_q_vector()
4863 ring->q_vector = q_vector; in igc_alloc_q_vector()
4866 igc_add_ring(ring, &q_vector->rx); in igc_alloc_q_vector()
4869 ring->count = adapter->rx_ring_count; in igc_alloc_q_vector()
4870 ring->queue_index = rxr_idx; in igc_alloc_q_vector()
4873 adapter->rx_ring[rxr_idx] = ring; in igc_alloc_q_vector()
4880 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
4884 * return -ENOMEM.
4888 int rxr_remaining = adapter->num_rx_queues; in igc_alloc_q_vectors()
4889 int txr_remaining = adapter->num_tx_queues; in igc_alloc_q_vectors()
4891 int q_vectors = adapter->num_q_vectors; in igc_alloc_q_vectors()
4903 rxr_remaining--; in igc_alloc_q_vectors()
4909 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); in igc_alloc_q_vectors()
4910 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); in igc_alloc_q_vectors()
4919 rxr_remaining -= rqpv; in igc_alloc_q_vectors()
4920 txr_remaining -= tqpv; in igc_alloc_q_vectors()
4928 adapter->num_tx_queues = 0; in igc_alloc_q_vectors()
4929 adapter->num_rx_queues = 0; in igc_alloc_q_vectors()
4930 adapter->num_q_vectors = 0; in igc_alloc_q_vectors()
4932 while (v_idx--) in igc_alloc_q_vectors()
4935 return -ENOMEM; in igc_alloc_q_vectors()
4939 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
4941 * @msix: boolean for MSI-X capability
4947 struct net_device *dev = adapter->netdev; in igc_init_interrupt_scheme()
4968 * igc_sw_init - Initialize general software structures (struct igc_adapter)
4977 struct net_device *netdev = adapter->netdev; in igc_sw_init()
4978 struct pci_dev *pdev = adapter->pdev; in igc_sw_init()
4979 struct igc_hw *hw = &adapter->hw; in igc_sw_init()
4981 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); in igc_sw_init()
4984 adapter->tx_ring_count = IGC_DEFAULT_TXD; in igc_sw_init()
4985 adapter->rx_ring_count = IGC_DEFAULT_RXD; in igc_sw_init()
4988 adapter->rx_itr_setting = IGC_DEFAULT_ITR; in igc_sw_init()
4989 adapter->tx_itr_setting = IGC_DEFAULT_ITR; in igc_sw_init()
4992 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; in igc_sw_init()
4995 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + in igc_sw_init()
4997 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in igc_sw_init()
4999 mutex_init(&adapter->nfc_rule_lock); in igc_sw_init()
5000 INIT_LIST_HEAD(&adapter->nfc_rule_list); in igc_sw_init()
5001 adapter->nfc_rule_count = 0; in igc_sw_init()
5003 spin_lock_init(&adapter->stats64_lock); in igc_sw_init()
5004 spin_lock_init(&adapter->qbv_tx_lock); in igc_sw_init()
5005 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ in igc_sw_init()
5006 adapter->flags |= IGC_FLAG_HAS_MSIX; in igc_sw_init()
5013 return -ENOMEM; in igc_sw_init()
5019 set_bit(__IGC_DOWN, &adapter->state); in igc_sw_init()
5027 struct igc_q_vector *q_vector = adapter->q_vector[vector]; in igc_set_queue_napi()
5029 if (q_vector->rx.ring) in igc_set_queue_napi()
5030 netif_queue_set_napi(adapter->netdev, in igc_set_queue_napi()
5031 q_vector->rx.ring->queue_index, in igc_set_queue_napi()
5034 if (q_vector->tx.ring) in igc_set_queue_napi()
5035 netif_queue_set_napi(adapter->netdev, in igc_set_queue_napi()
5036 q_vector->tx.ring->queue_index, in igc_set_queue_napi()
5041 * igc_up - Open the interface and prepare it to handle traffic
5046 struct igc_hw *hw = &adapter->hw; in igc_up()
5053 clear_bit(__IGC_DOWN, &adapter->state); in igc_up()
5055 for (i = 0; i < adapter->num_q_vectors; i++) { in igc_up()
5056 napi = &adapter->q_vector[i]->napi; in igc_up()
5061 if (adapter->msix_entries) in igc_up()
5064 igc_assign_vector(adapter->q_vector[0], 0); in igc_up()
5070 netif_tx_start_all_queues(adapter->netdev); in igc_up()
5073 hw->mac.get_link_status = true; in igc_up()
5074 schedule_work(&adapter->watchdog_task); in igc_up()
5078 * igc_update_stats - Update the board statistics counters
5083 struct rtnl_link_stats64 *net_stats = &adapter->stats64; in igc_update_stats()
5084 struct pci_dev *pdev = adapter->pdev; in igc_update_stats()
5085 struct igc_hw *hw = &adapter->hw; in igc_update_stats()
5092 /* Prevent stats update while adapter is being reset, or if the pci in igc_update_stats()
5095 if (adapter->link_speed == 0) in igc_update_stats()
5104 for (i = 0; i < adapter->num_rx_queues; i++) { in igc_update_stats()
5105 struct igc_ring *ring = adapter->rx_ring[i]; in igc_update_stats()
5108 if (hw->mac.type >= igc_i225) in igc_update_stats()
5112 ring->rx_stats.drops += rqdpc; in igc_update_stats()
5113 net_stats->rx_fifo_errors += rqdpc; in igc_update_stats()
5117 start = u64_stats_fetch_begin(&ring->rx_syncp); in igc_update_stats()
5118 _bytes = ring->rx_stats.bytes; in igc_update_stats()
5119 _packets = ring->rx_stats.packets; in igc_update_stats()
5120 } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); in igc_update_stats()
5125 net_stats->rx_bytes = bytes; in igc_update_stats()
5126 net_stats->rx_packets = packets; in igc_update_stats()
5130 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_update_stats()
5131 struct igc_ring *ring = adapter->tx_ring[i]; in igc_update_stats()
5134 start = u64_stats_fetch_begin(&ring->tx_syncp); in igc_update_stats()
5135 _bytes = ring->tx_stats.bytes; in igc_update_stats()
5136 _packets = ring->tx_stats.packets; in igc_update_stats()
5137 } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); in igc_update_stats()
5141 net_stats->tx_bytes = bytes; in igc_update_stats()
5142 net_stats->tx_packets = packets; in igc_update_stats()
5145 /* read stats registers */ in igc_update_stats()
5146 adapter->stats.crcerrs += rd32(IGC_CRCERRS); in igc_update_stats()
5147 adapter->stats.gprc += rd32(IGC_GPRC); in igc_update_stats()
5148 adapter->stats.gorc += rd32(IGC_GORCL); in igc_update_stats()
5150 adapter->stats.bprc += rd32(IGC_BPRC); in igc_update_stats()
5151 adapter->stats.mprc += rd32(IGC_MPRC); in igc_update_stats()
5152 adapter->stats.roc += rd32(IGC_ROC); in igc_update_stats()
5154 adapter->stats.prc64 += rd32(IGC_PRC64); in igc_update_stats()
5155 adapter->stats.prc127 += rd32(IGC_PRC127); in igc_update_stats()
5156 adapter->stats.prc255 += rd32(IGC_PRC255); in igc_update_stats()
5157 adapter->stats.prc511 += rd32(IGC_PRC511); in igc_update_stats()
5158 adapter->stats.prc1023 += rd32(IGC_PRC1023); in igc_update_stats()
5159 adapter->stats.prc1522 += rd32(IGC_PRC1522); in igc_update_stats()
5160 adapter->stats.tlpic += rd32(IGC_TLPIC); in igc_update_stats()
5161 adapter->stats.rlpic += rd32(IGC_RLPIC); in igc_update_stats()
5162 adapter->stats.hgptc += rd32(IGC_HGPTC); in igc_update_stats()
5165 adapter->stats.mpc += mpc; in igc_update_stats()
5166 net_stats->rx_fifo_errors += mpc; in igc_update_stats()
5167 adapter->stats.scc += rd32(IGC_SCC); in igc_update_stats()
5168 adapter->stats.ecol += rd32(IGC_ECOL); in igc_update_stats()
5169 adapter->stats.mcc += rd32(IGC_MCC); in igc_update_stats()
5170 adapter->stats.latecol += rd32(IGC_LATECOL); in igc_update_stats()
5171 adapter->stats.dc += rd32(IGC_DC); in igc_update_stats()
5172 adapter->stats.rlec += rd32(IGC_RLEC); in igc_update_stats()
5173 adapter->stats.xonrxc += rd32(IGC_XONRXC); in igc_update_stats()
5174 adapter->stats.xontxc += rd32(IGC_XONTXC); in igc_update_stats()
5175 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); in igc_update_stats()
5176 adapter->stats.xofftxc += rd32(IGC_XOFFTXC); in igc_update_stats()
5177 adapter->stats.fcruc += rd32(IGC_FCRUC); in igc_update_stats()
5178 adapter->stats.gptc += rd32(IGC_GPTC); in igc_update_stats()
5179 adapter->stats.gotc += rd32(IGC_GOTCL); in igc_update_stats()
5181 adapter->stats.rnbc += rd32(IGC_RNBC); in igc_update_stats()
5182 adapter->stats.ruc += rd32(IGC_RUC); in igc_update_stats()
5183 adapter->stats.rfc += rd32(IGC_RFC); in igc_update_stats()
5184 adapter->stats.rjc += rd32(IGC_RJC); in igc_update_stats()
5185 adapter->stats.tor += rd32(IGC_TORH); in igc_update_stats()
5186 adapter->stats.tot += rd32(IGC_TOTH); in igc_update_stats()
5187 adapter->stats.tpr += rd32(IGC_TPR); in igc_update_stats()
5189 adapter->stats.ptc64 += rd32(IGC_PTC64); in igc_update_stats()
5190 adapter->stats.ptc127 += rd32(IGC_PTC127); in igc_update_stats()
5191 adapter->stats.ptc255 += rd32(IGC_PTC255); in igc_update_stats()
5192 adapter->stats.ptc511 += rd32(IGC_PTC511); in igc_update_stats()
5193 adapter->stats.ptc1023 += rd32(IGC_PTC1023); in igc_update_stats()
5194 adapter->stats.ptc1522 += rd32(IGC_PTC1522); in igc_update_stats()
5196 adapter->stats.mptc += rd32(IGC_MPTC); in igc_update_stats()
5197 adapter->stats.bptc += rd32(IGC_BPTC); in igc_update_stats()
5199 adapter->stats.tpt += rd32(IGC_TPT); in igc_update_stats()
5200 adapter->stats.colc += rd32(IGC_COLC); in igc_update_stats()
5201 adapter->stats.colc += rd32(IGC_RERC); in igc_update_stats()
5203 adapter->stats.algnerrc += rd32(IGC_ALGNERRC); in igc_update_stats()
5205 adapter->stats.tsctc += rd32(IGC_TSCTC); in igc_update_stats()
5207 adapter->stats.iac += rd32(IGC_IAC); in igc_update_stats()
5210 net_stats->multicast = adapter->stats.mprc; in igc_update_stats()
5211 net_stats->collisions = adapter->stats.colc; in igc_update_stats()
5218 net_stats->rx_errors = adapter->stats.rxerrc + in igc_update_stats()
5219 adapter->stats.crcerrs + adapter->stats.algnerrc + in igc_update_stats()
5220 adapter->stats.ruc + adapter->stats.roc + in igc_update_stats()
5221 adapter->stats.cexterr; in igc_update_stats()
5222 net_stats->rx_length_errors = adapter->stats.ruc + in igc_update_stats()
5223 adapter->stats.roc; in igc_update_stats()
5224 net_stats->rx_crc_errors = adapter->stats.crcerrs; in igc_update_stats()
5225 net_stats->rx_frame_errors = adapter->stats.algnerrc; in igc_update_stats()
5226 net_stats->rx_missed_errors = adapter->stats.mpc; in igc_update_stats()
5229 net_stats->tx_errors = adapter->stats.ecol + in igc_update_stats()
5230 adapter->stats.latecol; in igc_update_stats()
5231 net_stats->tx_aborted_errors = adapter->stats.ecol; in igc_update_stats()
5232 net_stats->tx_window_errors = adapter->stats.latecol; in igc_update_stats()
5233 net_stats->tx_carrier_errors = adapter->stats.tncrs; in igc_update_stats()
5236 net_stats->tx_dropped = adapter->stats.txdrop; in igc_update_stats()
5238 /* Management Stats */ in igc_update_stats()
5239 adapter->stats.mgptc += rd32(IGC_MGTPTC); in igc_update_stats()
5240 adapter->stats.mgprc += rd32(IGC_MGTPRC); in igc_update_stats()
5241 adapter->stats.mgpdc += rd32(IGC_MGTPDC); in igc_update_stats()
5245 * igc_down - Close the interface
5250 struct net_device *netdev = adapter->netdev; in igc_down()
5251 struct igc_hw *hw = &adapter->hw; in igc_down()
5255 set_bit(__IGC_DOWN, &adapter->state); in igc_down()
5259 if (pci_device_is_present(adapter->pdev)) { in igc_down()
5271 if (pci_device_is_present(adapter->pdev)) { in igc_down()
5283 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; in igc_down()
5285 for (i = 0; i < adapter->num_q_vectors; i++) { in igc_down()
5286 if (adapter->q_vector[i]) { in igc_down()
5287 napi_synchronize(&adapter->q_vector[i]->napi); in igc_down()
5289 napi_disable(&adapter->q_vector[i]->napi); in igc_down()
5293 del_timer_sync(&adapter->watchdog_timer); in igc_down()
5294 del_timer_sync(&adapter->phy_info_timer); in igc_down()
5296 /* record the stats before reset*/ in igc_down()
5297 spin_lock(&adapter->stats64_lock); in igc_down()
5299 spin_unlock(&adapter->stats64_lock); in igc_down()
5301 adapter->link_speed = 0; in igc_down()
5302 adapter->link_duplex = 0; in igc_down()
5304 if (!pci_channel_offline(adapter->pdev)) in igc_down()
5308 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; in igc_down()
5317 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) in igc_reinit_locked()
5321 clear_bit(__IGC_RESETTING, &adapter->state); in igc_reinit_locked()
5332 if (test_bit(__IGC_DOWN, &adapter->state) || in igc_reset_task()
5333 test_bit(__IGC_RESETTING, &adapter->state)) { in igc_reset_task()
5340 netdev_err(adapter->netdev, "Reset adapter\n"); in igc_reset_task()
5346 * igc_change_mtu - Change the Maximum Transfer Unit
5359 return -EINVAL; in igc_change_mtu()
5366 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) in igc_change_mtu()
5370 adapter->max_frame_size = max_frame; in igc_change_mtu()
5375 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); in igc_change_mtu()
5376 WRITE_ONCE(netdev->mtu, new_mtu); in igc_change_mtu()
5383 clear_bit(__IGC_RESETTING, &adapter->state); in igc_change_mtu()
5389 * igc_tx_timeout - Respond to a Tx Hang
5397 struct igc_hw *hw = &adapter->hw; in igc_tx_timeout()
5400 adapter->tx_timeout_count++; in igc_tx_timeout()
5401 schedule_work(&adapter->reset_task); in igc_tx_timeout()
5403 (adapter->eims_enable_mask & ~adapter->eims_other)); in igc_tx_timeout()
5407 * igc_get_stats64 - Get System Network Statistics
5409 * @stats: rtnl_link_stats64 pointer
5415 struct rtnl_link_stats64 *stats) in igc_get_stats64() argument
5419 spin_lock(&adapter->stats64_lock); in igc_get_stats64()
5420 if (!test_bit(__IGC_RESETTING, &adapter->state)) in igc_get_stats64()
5422 memcpy(stats, &adapter->stats64, sizeof(*stats)); in igc_get_stats64()
5423 spin_unlock(&adapter->stats64_lock); in igc_get_stats64()
5443 netdev_features_t changed = netdev->features ^ features; in igc_set_features()
5456 netdev->features = features; in igc_set_features()
5481 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); in igc_features_check()
5491 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) in igc_features_check()
5499 struct igc_hw *hw = &adapter->hw; in igc_tsync_interrupt()
5508 if (adapter->ptp_caps.pps) in igc_tsync_interrupt()
5509 ptp_clock_event(adapter->ptp_clock, &event); in igc_tsync_interrupt()
5518 spin_lock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5519 ts = timespec64_add(adapter->perout[0].start, in igc_tsync_interrupt()
5520 adapter->perout[0].period); in igc_tsync_interrupt()
5526 adapter->perout[0].start = ts; in igc_tsync_interrupt()
5527 spin_unlock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5531 spin_lock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5532 ts = timespec64_add(adapter->perout[1].start, in igc_tsync_interrupt()
5533 adapter->perout[1].period); in igc_tsync_interrupt()
5539 adapter->perout[1].start = ts; in igc_tsync_interrupt()
5540 spin_unlock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5549 ptp_clock_event(adapter->ptp_clock, &event); in igc_tsync_interrupt()
5558 ptp_clock_event(adapter->ptp_clock, &event); in igc_tsync_interrupt()
5563 * igc_msix_other - msix other interrupt handler
5570 struct igc_hw *hw = &adapter->hw; in igc_msix_other()
5575 schedule_work(&adapter->reset_task); in igc_msix_other()
5579 adapter->stats.doosync++; in igc_msix_other()
5583 hw->mac.get_link_status = true; in igc_msix_other()
5585 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_msix_other()
5586 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_msix_other()
5592 wr32(IGC_EIMS, adapter->eims_other); in igc_msix_other()
5599 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; in igc_write_itr()
5601 if (!q_vector->set_itr) in igc_write_itr()
5609 writel(itr_val, q_vector->itr_register); in igc_write_itr()
5610 q_vector->set_itr = 0; in igc_write_itr()
5620 napi_schedule(&q_vector->napi); in igc_msix_ring()
5626 * igc_request_msix - Initialize MSI-X interrupts
5629 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
5634 unsigned int num_q_vectors = adapter->num_q_vectors; in igc_request_msix()
5636 struct net_device *netdev = adapter->netdev; in igc_request_msix()
5638 err = request_irq(adapter->msix_entries[vector].vector, in igc_request_msix()
5639 &igc_msix_other, 0, netdev->name, adapter); in igc_request_msix()
5645 dev_warn(&adapter->pdev->dev, in igc_request_msix()
5647 adapter->num_q_vectors, MAX_Q_VECTORS); in igc_request_msix()
5650 struct igc_q_vector *q_vector = adapter->q_vector[i]; in igc_request_msix()
5654 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); in igc_request_msix()
5656 if (q_vector->rx.ring && q_vector->tx.ring) in igc_request_msix()
5657 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, in igc_request_msix()
5658 q_vector->rx.ring->queue_index); in igc_request_msix()
5659 else if (q_vector->tx.ring) in igc_request_msix()
5660 sprintf(q_vector->name, "%s-tx-%u", netdev->name, in igc_request_msix()
5661 q_vector->tx.ring->queue_index); in igc_request_msix()
5662 else if (q_vector->rx.ring) in igc_request_msix()
5663 sprintf(q_vector->name, "%s-rx-%u", netdev->name, in igc_request_msix()
5664 q_vector->rx.ring->queue_index); in igc_request_msix()
5666 sprintf(q_vector->name, "%s-unused", netdev->name); in igc_request_msix()
5668 err = request_irq(adapter->msix_entries[vector].vector, in igc_request_msix()
5669 igc_msix_ring, 0, q_vector->name, in igc_request_msix()
5674 netif_napi_set_irq(&q_vector->napi, in igc_request_msix()
5675 adapter->msix_entries[vector].vector); in igc_request_msix()
5683 free_irq(adapter->msix_entries[free_vector++].vector, adapter); in igc_request_msix()
5685 vector--; in igc_request_msix()
5687 free_irq(adapter->msix_entries[free_vector++].vector, in igc_request_msix()
5688 adapter->q_vector[i]); in igc_request_msix()
5695 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
5699 * MSI-X interrupts allocated.
5714 igc_get_phy_info(&adapter->hw); in igc_update_phy_info()
5718 * igc_has_link - check shared code for link and determine up/down
5723 struct igc_hw *hw = &adapter->hw; in igc_has_link()
5731 if (!hw->mac.get_link_status) in igc_has_link()
5733 hw->mac.ops.check_for_link(hw); in igc_has_link()
5734 link_active = !hw->mac.get_link_status; in igc_has_link()
5736 if (hw->mac.type == igc_i225) { in igc_has_link()
5737 if (!netif_carrier_ok(adapter->netdev)) { in igc_has_link()
5738 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; in igc_has_link()
5739 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { in igc_has_link()
5740 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; in igc_has_link()
5741 adapter->link_check_timeout = jiffies; in igc_has_link()
5749 * igc_watchdog - Timer Call-back
5756 schedule_work(&adapter->watchdog_task); in igc_watchdog()
5764 struct net_device *netdev = adapter->netdev; in igc_watchdog_task()
5765 struct igc_hw *hw = &adapter->hw; in igc_watchdog_task()
5766 struct igc_phy_info *phy = &hw->phy; in igc_watchdog_task()
5773 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { in igc_watchdog_task()
5774 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) in igc_watchdog_task()
5775 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; in igc_watchdog_task()
5782 pm_runtime_resume(netdev->dev.parent); in igc_watchdog_task()
5787 hw->mac.ops.get_speed_and_duplex(hw, in igc_watchdog_task()
5788 &adapter->link_speed, in igc_watchdog_task()
5789 &adapter->link_duplex); in igc_watchdog_task()
5795 adapter->link_speed, in igc_watchdog_task()
5796 adapter->link_duplex == FULL_DUPLEX ? in igc_watchdog_task()
5804 if ((adapter->flags & IGC_FLAG_EEE) && in igc_watchdog_task()
5805 adapter->link_duplex == HALF_DUPLEX) { in igc_watchdog_task()
5807 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); in igc_watchdog_task()
5808 adapter->hw.dev_spec._base.eee_enable = false; in igc_watchdog_task()
5809 adapter->flags &= ~IGC_FLAG_EEE; in igc_watchdog_task()
5814 if (phy->speed_downgraded) in igc_watchdog_task()
5818 adapter->tx_timeout_factor = 1; in igc_watchdog_task()
5819 switch (adapter->link_speed) { in igc_watchdog_task()
5821 adapter->tx_timeout_factor = 14; in igc_watchdog_task()
5826 adapter->tx_timeout_factor = 1; in igc_watchdog_task()
5832 * based on link-up activity. Write into the register in igc_watchdog_task()
5837 if (adapter->link_speed != SPEED_1000) in igc_watchdog_task()
5847 retry_count--; in igc_watchdog_task()
5853 netdev_err(netdev, "read 1000Base-T Status Reg\n"); in igc_watchdog_task()
5859 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_watchdog_task()
5860 mod_timer(&adapter->phy_info_timer, in igc_watchdog_task()
5865 adapter->link_speed = 0; in igc_watchdog_task()
5866 adapter->link_duplex = 0; in igc_watchdog_task()
5873 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_watchdog_task()
5874 mod_timer(&adapter->phy_info_timer, in igc_watchdog_task()
5877 pm_schedule_suspend(netdev->dev.parent, in igc_watchdog_task()
5882 spin_lock(&adapter->stats64_lock); in igc_watchdog_task()
5884 spin_unlock(&adapter->stats64_lock); in igc_watchdog_task()
5886 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_watchdog_task()
5887 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_watchdog_task()
5895 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { in igc_watchdog_task()
5896 adapter->tx_timeout_count++; in igc_watchdog_task()
5897 schedule_work(&adapter->reset_task); in igc_watchdog_task()
5904 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igc_watchdog_task()
5908 if (adapter->flags & IGC_FLAG_HAS_MSIX) { in igc_watchdog_task()
5911 for (i = 0; i < adapter->num_q_vectors; i++) { in igc_watchdog_task()
5912 struct igc_q_vector *q_vector = adapter->q_vector[i]; in igc_watchdog_task()
5915 if (!q_vector->rx.ring) in igc_watchdog_task()
5918 rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index]; in igc_watchdog_task()
5920 if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { in igc_watchdog_task()
5921 eics |= q_vector->eims_value; in igc_watchdog_task()
5922 clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_watchdog_task()
5928 struct igc_ring *rx_ring = adapter->rx_ring[0]; in igc_watchdog_task()
5930 if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { in igc_watchdog_task()
5931 clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_watchdog_task()
5939 if (!test_bit(__IGC_DOWN, &adapter->state)) { in igc_watchdog_task()
5940 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) in igc_watchdog_task()
5941 mod_timer(&adapter->watchdog_timer, in igc_watchdog_task()
5944 mod_timer(&adapter->watchdog_timer, in igc_watchdog_task()
5950 * igc_intr_msi - Interrupt Handler
5957 struct igc_q_vector *q_vector = adapter->q_vector[0]; in igc_intr_msi()
5958 struct igc_hw *hw = &adapter->hw; in igc_intr_msi()
5965 schedule_work(&adapter->reset_task); in igc_intr_msi()
5969 adapter->stats.doosync++; in igc_intr_msi()
5973 hw->mac.get_link_status = true; in igc_intr_msi()
5974 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_intr_msi()
5975 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_intr_msi()
5981 napi_schedule(&q_vector->napi); in igc_intr_msi()
5987 * igc_intr - Legacy Interrupt Handler
5994 struct igc_q_vector *q_vector = adapter->q_vector[0]; in igc_intr()
5995 struct igc_hw *hw = &adapter->hw; in igc_intr()
5996 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No in igc_intr()
6001 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is in igc_intr()
6010 schedule_work(&adapter->reset_task); in igc_intr()
6014 adapter->stats.doosync++; in igc_intr()
6018 hw->mac.get_link_status = true; in igc_intr()
6020 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_intr()
6021 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_intr()
6027 napi_schedule(&q_vector->napi); in igc_intr()
6034 if (adapter->msix_entries) { in igc_free_irq()
6037 free_irq(adapter->msix_entries[vector++].vector, adapter); in igc_free_irq()
6039 for (i = 0; i < adapter->num_q_vectors; i++) in igc_free_irq()
6040 free_irq(adapter->msix_entries[vector++].vector, in igc_free_irq()
6041 adapter->q_vector[i]); in igc_free_irq()
6043 free_irq(adapter->pdev->irq, adapter); in igc_free_irq()
6048 * igc_request_irq - initialize interrupts
6056 struct net_device *netdev = adapter->netdev; in igc_request_irq()
6057 struct pci_dev *pdev = adapter->pdev; in igc_request_irq()
6060 if (adapter->flags & IGC_FLAG_HAS_MSIX) { in igc_request_irq()
6077 igc_assign_vector(adapter->q_vector[0], 0); in igc_request_irq()
6079 if (adapter->flags & IGC_FLAG_HAS_MSI) { in igc_request_irq()
6080 err = request_irq(pdev->irq, &igc_intr_msi, 0, in igc_request_irq()
6081 netdev->name, adapter); in igc_request_irq()
6087 adapter->flags &= ~IGC_FLAG_HAS_MSI; in igc_request_irq()
6090 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, in igc_request_irq()
6091 netdev->name, adapter); in igc_request_irq()
6101 * __igc_open - Called when a network interface is made active
6116 struct pci_dev *pdev = adapter->pdev; in __igc_open()
6117 struct igc_hw *hw = &adapter->hw; in __igc_open()
6124 if (test_bit(__IGC_TESTING, &adapter->state)) { in __igc_open()
6126 return -EBUSY; in __igc_open()
6130 pm_runtime_get_sync(&pdev->dev); in __igc_open()
6152 clear_bit(__IGC_DOWN, &adapter->state); in __igc_open()
6154 for (i = 0; i < adapter->num_q_vectors; i++) { in __igc_open()
6155 napi = &adapter->q_vector[i]->napi; in __igc_open()
6165 pm_runtime_put(&pdev->dev); in __igc_open()
6170 hw->mac.get_link_status = true; in __igc_open()
6171 schedule_work(&adapter->watchdog_task); in __igc_open()
6177 igc_power_down_phy_copper_base(&adapter->hw); in __igc_open()
6184 pm_runtime_put(&pdev->dev); in __igc_open()
6195 err = netif_set_real_num_queues(netdev, adapter->num_tx_queues, in igc_open()
6196 adapter->num_rx_queues); in igc_open()
6206 * __igc_close - Disables a network interface
6212 * The close entry point is called when an interface is de-activated
6220 struct pci_dev *pdev = adapter->pdev; in __igc_close()
6222 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); in __igc_close()
6225 pm_runtime_get_sync(&pdev->dev); in __igc_close()
6237 pm_runtime_put_sync(&pdev->dev); in __igc_close()
6244 if (netif_device_present(netdev) || netdev->dismantle) in igc_close()
6250 * igc_ioctl - Access the hwtstamp interface
6263 return -EOPNOTSUPP; in igc_ioctl()
6272 if (queue < 0 || queue >= adapter->num_tx_queues) in igc_save_launchtime_params()
6273 return -EINVAL; in igc_save_launchtime_params()
6275 ring = adapter->tx_ring[queue]; in igc_save_launchtime_params()
6276 ring->launchtime_enable = enable; in igc_save_launchtime_params()
6294 struct igc_hw *hw = &adapter->hw; in validate_schedule()
6298 if (qopt->cycle_time_extension) in validate_schedule()
6309 if (!is_base_time_past(qopt->base_time, &now) && in validate_schedule()
6313 for (n = 0; n < qopt->num_entries; n++) { in validate_schedule()
6317 prev = n ? &qopt->entries[n - 1] : NULL; in validate_schedule()
6318 e = &qopt->entries[n]; in validate_schedule()
6323 if (e->command != TC_TAPRIO_CMD_SET_GATES) in validate_schedule()
6326 for (i = 0; i < adapter->num_tx_queues; i++) in validate_schedule()
6327 if (e->gate_mask & BIT(i)) { in validate_schedule()
6335 !(prev->gate_mask & BIT(i))) in validate_schedule()
6346 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_launchtime()
6349 if (hw->mac.type != igc_i225) in igc_tsn_enable_launchtime()
6350 return -EOPNOTSUPP; in igc_tsn_enable_launchtime()
6352 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); in igc_tsn_enable_launchtime()
6364 adapter->base_time = 0; in igc_qbv_clear_schedule()
6365 adapter->cycle_time = NSEC_PER_SEC; in igc_qbv_clear_schedule()
6366 adapter->taprio_offload_enable = false; in igc_qbv_clear_schedule()
6367 adapter->qbv_config_change_errors = 0; in igc_qbv_clear_schedule()
6368 adapter->qbv_count = 0; in igc_qbv_clear_schedule()
6370 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_qbv_clear_schedule()
6371 struct igc_ring *ring = adapter->tx_ring[i]; in igc_qbv_clear_schedule()
6373 ring->start_time = 0; in igc_qbv_clear_schedule()
6374 ring->end_time = NSEC_PER_SEC; in igc_qbv_clear_schedule()
6375 ring->max_sdu = 0; in igc_qbv_clear_schedule()
6378 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); in igc_qbv_clear_schedule()
6380 adapter->qbv_transition = false; in igc_qbv_clear_schedule()
6382 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_qbv_clear_schedule()
6383 struct igc_ring *ring = adapter->tx_ring[i]; in igc_qbv_clear_schedule()
6385 ring->oper_gate_closed = false; in igc_qbv_clear_schedule()
6386 ring->admin_gate_closed = false; in igc_qbv_clear_schedule()
6389 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); in igc_qbv_clear_schedule()
6402 struct tc_taprio_qopt_stats *stats) in igc_taprio_stats() argument
6407 stats->tx_overruns = 0; in igc_taprio_stats()
6413 struct tc_taprio_qopt_stats *stats = &queue_stats->stats; in igc_taprio_queue_stats() local
6418 stats->tx_overruns = 0; in igc_taprio_queue_stats()
6425 struct igc_hw *hw = &adapter->hw; in igc_save_qbv_schedule()
6432 if (qopt->base_time < 0) in igc_save_qbv_schedule()
6433 return -ERANGE; in igc_save_qbv_schedule()
6435 if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable) in igc_save_qbv_schedule()
6436 return -EALREADY; in igc_save_qbv_schedule()
6439 return -EINVAL; in igc_save_qbv_schedule()
6444 is_base_time_past(qopt->base_time, &now)) in igc_save_qbv_schedule()
6445 adapter->qbv_config_change_errors++; in igc_save_qbv_schedule()
6447 adapter->cycle_time = qopt->cycle_time; in igc_save_qbv_schedule()
6448 adapter->base_time = qopt->base_time; in igc_save_qbv_schedule()
6449 adapter->taprio_offload_enable = true; in igc_save_qbv_schedule()
6451 for (n = 0; n < qopt->num_entries; n++) { in igc_save_qbv_schedule()
6452 struct tc_taprio_sched_entry *e = &qopt->entries[n]; in igc_save_qbv_schedule()
6454 end_time += e->interval; in igc_save_qbv_schedule()
6462 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, in igc_save_qbv_schedule()
6467 if (end_time > adapter->cycle_time || in igc_save_qbv_schedule()
6468 n + 1 == qopt->num_entries) in igc_save_qbv_schedule()
6469 end_time = adapter->cycle_time; in igc_save_qbv_schedule()
6471 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_save_qbv_schedule()
6472 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
6474 if (!(e->gate_mask & BIT(i))) in igc_save_qbv_schedule()
6482 ring->start_time = start_time; in igc_save_qbv_schedule()
6483 ring->end_time = end_time; in igc_save_qbv_schedule()
6485 if (ring->start_time >= adapter->cycle_time) in igc_save_qbv_schedule()
6491 start_time += e->interval; in igc_save_qbv_schedule()
6494 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); in igc_save_qbv_schedule()
6499 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_save_qbv_schedule()
6500 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
6502 if (!is_base_time_past(qopt->base_time, &now)) { in igc_save_qbv_schedule()
6503 ring->admin_gate_closed = false; in igc_save_qbv_schedule()
6505 ring->oper_gate_closed = false; in igc_save_qbv_schedule()
6506 ring->admin_gate_closed = false; in igc_save_qbv_schedule()
6510 if (!is_base_time_past(qopt->base_time, &now)) in igc_save_qbv_schedule()
6511 ring->admin_gate_closed = true; in igc_save_qbv_schedule()
6513 ring->oper_gate_closed = true; in igc_save_qbv_schedule()
6515 ring->start_time = end_time; in igc_save_qbv_schedule()
6516 ring->end_time = end_time; in igc_save_qbv_schedule()
6520 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); in igc_save_qbv_schedule()
6522 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_save_qbv_schedule()
6523 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
6524 struct net_device *dev = adapter->netdev; in igc_save_qbv_schedule()
6526 if (qopt->max_sdu[i]) in igc_save_qbv_schedule()
6527 ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN; in igc_save_qbv_schedule()
6529 ring->max_sdu = 0; in igc_save_qbv_schedule()
6538 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_qbv_scheduling()
6541 if (hw->mac.type != igc_i225) in igc_tsn_enable_qbv_scheduling()
6542 return -EOPNOTSUPP; in igc_tsn_enable_qbv_scheduling()
6544 switch (qopt->cmd) { in igc_tsn_enable_qbv_scheduling()
6552 igc_taprio_stats(adapter->netdev, &qopt->stats); in igc_tsn_enable_qbv_scheduling()
6555 igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats); in igc_tsn_enable_qbv_scheduling()
6558 return -EOPNOTSUPP; in igc_tsn_enable_qbv_scheduling()
6572 struct net_device *netdev = adapter->netdev; in igc_save_cbs_params()
6576 /* i225 has two sets of credit-based shaper logic. in igc_save_cbs_params()
6580 return -EINVAL; in igc_save_cbs_params()
6582 ring = adapter->tx_ring[queue]; in igc_save_cbs_params()
6585 if (adapter->tx_ring[i]) in igc_save_cbs_params()
6586 cbs_status[i] = adapter->tx_ring[i]->cbs_enable; in igc_save_cbs_params()
6595 return -EINVAL; in igc_save_cbs_params()
6601 return -EINVAL; in igc_save_cbs_params()
6605 ring->cbs_enable = enable; in igc_save_cbs_params()
6606 ring->idleslope = idleslope; in igc_save_cbs_params()
6607 ring->sendslope = sendslope; in igc_save_cbs_params()
6608 ring->hicredit = hicredit; in igc_save_cbs_params()
6609 ring->locredit = locredit; in igc_save_cbs_params()
6617 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_cbs()
6620 if (hw->mac.type != igc_i225) in igc_tsn_enable_cbs()
6621 return -EOPNOTSUPP; in igc_tsn_enable_cbs()
6623 if (qopt->queue < 0 || qopt->queue > 1) in igc_tsn_enable_cbs()
6624 return -EINVAL; in igc_tsn_enable_cbs()
6626 err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, in igc_tsn_enable_cbs()
6627 qopt->idleslope, qopt->sendslope, in igc_tsn_enable_cbs()
6628 qopt->hicredit, qopt->locredit); in igc_tsn_enable_cbs()
6638 struct igc_hw *hw = &adapter->hw; in igc_tc_query_caps()
6640 switch (base->type) { in igc_tc_query_caps()
6642 struct tc_mqprio_caps *caps = base->caps; in igc_tc_query_caps()
6644 caps->validate_queue_counts = true; in igc_tc_query_caps()
6649 struct tc_taprio_caps *caps = base->caps; in igc_tc_query_caps()
6651 caps->broken_mqprio = true; in igc_tc_query_caps()
6653 if (hw->mac.type == igc_i225) { in igc_tc_query_caps()
6654 caps->supports_queue_max_sdu = true; in igc_tc_query_caps()
6655 caps->gate_mask_per_txq = true; in igc_tc_query_caps()
6661 return -EOPNOTSUPP; in igc_tc_query_caps()
6670 adapter->strict_priority_enable = true; in igc_save_mqprio_params()
6671 adapter->num_tc = num_tc; in igc_save_mqprio_params()
6674 adapter->queue_per_tc[i] = offset[i]; in igc_save_mqprio_params()
6680 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_mqprio()
6683 if (hw->mac.type != igc_i225) in igc_tsn_enable_mqprio()
6684 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6686 if (!mqprio->qopt.num_tc) { in igc_tsn_enable_mqprio()
6687 adapter->strict_priority_enable = false; in igc_tsn_enable_mqprio()
6692 if (mqprio->qopt.num_tc != adapter->num_tx_queues) { in igc_tsn_enable_mqprio()
6693 NL_SET_ERR_MSG_FMT_MOD(mqprio->extack, in igc_tsn_enable_mqprio()
6695 adapter->num_tx_queues); in igc_tsn_enable_mqprio()
6696 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6700 for (i = 0; i < mqprio->qopt.num_tc; i++) { in igc_tsn_enable_mqprio()
6701 if (mqprio->qopt.count[i] != 1) { in igc_tsn_enable_mqprio()
6702 NL_SET_ERR_MSG_MOD(mqprio->extack, in igc_tsn_enable_mqprio()
6704 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6709 if (mqprio->preemptible_tcs) { in igc_tsn_enable_mqprio()
6710 NL_SET_ERR_MSG_MOD(mqprio->extack, in igc_tsn_enable_mqprio()
6712 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6715 igc_save_mqprio_params(adapter, mqprio->qopt.num_tc, in igc_tsn_enable_mqprio()
6716 mqprio->qopt.offset); in igc_tsn_enable_mqprio()
6718 mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS; in igc_tsn_enable_mqprio()
6729 adapter->tc_setup_type = type; in igc_setup_tc()
6747 return -EOPNOTSUPP; in igc_setup_tc()
6755 switch (bpf->command) { in igc_bpf()
6757 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); in igc_bpf()
6759 return igc_xdp_setup_pool(adapter, bpf->xsk.pool, in igc_bpf()
6760 bpf->xsk.queue_id); in igc_bpf()
6762 return -EOPNOTSUPP; in igc_bpf()
6776 return -ENETDOWN; in igc_xdp_xmit()
6779 return -EINVAL; in igc_xdp_xmit()
6811 struct igc_hw *hw = &adapter->hw; in igc_trigger_rxtxq_interrupt()
6814 eics |= q_vector->eims_value; in igc_trigger_rxtxq_interrupt()
6824 if (test_bit(__IGC_DOWN, &adapter->state)) in igc_xsk_wakeup()
6825 return -ENETDOWN; in igc_xsk_wakeup()
6828 return -ENXIO; in igc_xsk_wakeup()
6830 if (queue_id >= adapter->num_rx_queues) in igc_xsk_wakeup()
6831 return -EINVAL; in igc_xsk_wakeup()
6833 ring = adapter->rx_ring[queue_id]; in igc_xsk_wakeup()
6835 if (!ring->xsk_pool) in igc_xsk_wakeup()
6836 return -ENXIO; in igc_xsk_wakeup()
6838 q_vector = adapter->q_vector[queue_id]; in igc_xsk_wakeup()
6839 if (!napi_if_scheduled_mark_missed(&q_vector->napi)) in igc_xsk_wakeup()
6853 tstamp = hwtstamps->netdev_data; in igc_get_tstamp()
6856 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer1); in igc_get_tstamp()
6858 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); in igc_get_tstamp()
6886 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); in igc_rd32()
6896 struct net_device *netdev = igc->netdev; in igc_rd32()
6898 hw->hw_addr = NULL; in igc_rd32()
6901 WARN(pci_device_is_present(igc->pdev), in igc_rd32()
6921 [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */
6933 if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) in igc_xdp_rx_hash()
6934 return -ENODATA; in igc_xdp_rx_hash()
6936 *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); in igc_xdp_rx_hash()
6937 *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)]; in igc_xdp_rx_hash()
6945 struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev); in igc_xdp_rx_timestamp()
6946 struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts; in igc_xdp_rx_timestamp()
6948 if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { in igc_xdp_rx_timestamp()
6949 *timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); in igc_xdp_rx_timestamp()
6954 return -ENODATA; in igc_xdp_rx_timestamp()
6969 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); in igc_qbv_scheduling_timer()
6971 adapter->qbv_transition = true; in igc_qbv_scheduling_timer()
6972 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_qbv_scheduling_timer()
6973 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_qbv_scheduling_timer()
6975 if (tx_ring->admin_gate_closed) { in igc_qbv_scheduling_timer()
6976 tx_ring->admin_gate_closed = false; in igc_qbv_scheduling_timer()
6977 tx_ring->oper_gate_closed = true; in igc_qbv_scheduling_timer()
6979 tx_ring->oper_gate_closed = false; in igc_qbv_scheduling_timer()
6982 adapter->qbv_transition = false; in igc_qbv_scheduling_timer()
6984 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); in igc_qbv_scheduling_timer()
6990 * igc_probe - Device Initialization Routine
7006 const struct igc_info *ei = igc_info_tbl[ent->driver_data]; in igc_probe()
7013 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in igc_probe()
7015 dev_err(&pdev->dev, in igc_probe()
7026 dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); in igc_probe()
7030 err = -ENOMEM; in igc_probe()
7037 SET_NETDEV_DEV(netdev, &pdev->dev); in igc_probe()
7041 adapter->netdev = netdev; in igc_probe()
7042 adapter->pdev = pdev; in igc_probe()
7043 hw = &adapter->hw; in igc_probe()
7044 hw->back = adapter; in igc_probe()
7045 adapter->port_num = hw->bus.func; in igc_probe()
7046 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in igc_probe()
7052 err = -EIO; in igc_probe()
7053 adapter->io_addr = ioremap(pci_resource_start(pdev, 0), in igc_probe()
7055 if (!adapter->io_addr) in igc_probe()
7058 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ in igc_probe()
7059 hw->hw_addr = adapter->io_addr; in igc_probe()
7061 netdev->netdev_ops = &igc_netdev_ops; in igc_probe()
7062 netdev->xdp_metadata_ops = &igc_xdp_metadata_ops; in igc_probe()
7063 netdev->xsk_tx_metadata_ops = &igc_xsk_tx_metadata_ops; in igc_probe()
7065 netdev->watchdog_timeo = 5 * HZ; in igc_probe()
7067 netdev->mem_start = pci_resource_start(pdev, 0); in igc_probe()
7068 netdev->mem_end = pci_resource_end(pdev, 0); in igc_probe()
7071 hw->vendor_id = pdev->vendor; in igc_probe()
7072 hw->device_id = pdev->device; in igc_probe()
7073 hw->revision_id = pdev->revision; in igc_probe()
7074 hw->subsystem_vendor_id = pdev->subsystem_vendor; in igc_probe()
7075 hw->subsystem_device_id = pdev->subsystem_device; in igc_probe()
7078 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); in igc_probe()
7079 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); in igc_probe()
7081 /* Initialize skew-specific constants */ in igc_probe()
7082 err = ei->get_invariants(hw); in igc_probe()
7087 netdev->features |= NETIF_F_SG; in igc_probe()
7088 netdev->features |= NETIF_F_TSO; in igc_probe()
7089 netdev->features |= NETIF_F_TSO6; in igc_probe()
7090 netdev->features |= NETIF_F_TSO_ECN; in igc_probe()
7091 netdev->features |= NETIF_F_RXHASH; in igc_probe()
7092 netdev->features |= NETIF_F_RXCSUM; in igc_probe()
7093 netdev->features |= NETIF_F_HW_CSUM; in igc_probe()
7094 netdev->features |= NETIF_F_SCTP_CRC; in igc_probe()
7095 netdev->features |= NETIF_F_HW_TC; in igc_probe()
7104 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; in igc_probe()
7105 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; in igc_probe()
7113 netdev->hw_features |= NETIF_F_NTUPLE; in igc_probe()
7114 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; in igc_probe()
7115 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; in igc_probe()
7116 netdev->hw_features |= netdev->features; in igc_probe()
7118 netdev->features |= NETIF_F_HIGHDMA; in igc_probe()
7120 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; in igc_probe()
7121 netdev->mpls_features |= NETIF_F_HW_CSUM; in igc_probe()
7122 netdev->hw_enc_features |= netdev->vlan_features; in igc_probe()
7124 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in igc_probe()
7127 /* MTU range: 68 - 9216 */ in igc_probe()
7128 netdev->min_mtu = ETH_MIN_MTU; in igc_probe()
7129 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; in igc_probe()
7134 hw->mac.ops.reset_hw(hw); in igc_probe()
7137 if (hw->nvm.ops.validate(hw) < 0) { in igc_probe()
7138 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); in igc_probe()
7139 err = -EIO; in igc_probe()
7144 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { in igc_probe()
7146 if (hw->mac.ops.read_mac_addr(hw)) in igc_probe()
7147 dev_err(&pdev->dev, "NVM Read Error\n"); in igc_probe()
7150 eth_hw_addr_set(netdev, hw->mac.addr); in igc_probe()
7152 if (!is_valid_ether_addr(netdev->dev_addr)) { in igc_probe()
7153 dev_err(&pdev->dev, "Invalid MAC Address\n"); in igc_probe()
7154 err = -EIO; in igc_probe()
7162 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); in igc_probe()
7163 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); in igc_probe()
7165 INIT_WORK(&adapter->reset_task, igc_reset_task); in igc_probe()
7166 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); in igc_probe()
7168 hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in igc_probe()
7169 adapter->hrtimer.function = &igc_qbv_scheduling_timer; in igc_probe()
7171 /* Initialize link properties that are user-changeable */ in igc_probe()
7172 adapter->fc_autoneg = true; in igc_probe()
7173 hw->phy.autoneg_advertised = 0xaf; in igc_probe()
7175 hw->fc.requested_mode = igc_fc_default; in igc_probe()
7176 hw->fc.current_mode = igc_fc_default; in igc_probe()
7179 adapter->flags |= IGC_FLAG_WOL_SUPPORTED; in igc_probe()
7182 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) in igc_probe()
7183 adapter->wol |= IGC_WUFC_MAG; in igc_probe()
7185 device_set_wakeup_enable(&adapter->pdev->dev, in igc_probe()
7186 adapter->flags & IGC_FLAG_WOL_SUPPORTED); in igc_probe()
7200 strscpy(netdev->name, "eth%d", sizeof(netdev->name)); in igc_probe()
7209 adapter->ei = *ei; in igc_probe()
7213 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); in igc_probe()
7215 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); in igc_probe()
7217 hw->dev_spec._base.eee_enable = false; in igc_probe()
7218 adapter->flags &= ~IGC_FLAG_EEE; in igc_probe()
7221 pm_runtime_put_noidle(&pdev->dev); in igc_probe()
7239 iounmap(adapter->io_addr); in igc_probe()
7251 * igc_remove - Device Removal Routine
7256 * Hot-Plug event, or because the driver is going to be removed from
7264 pm_runtime_get_noresume(&pdev->dev); in igc_remove()
7273 set_bit(__IGC_DOWN, &adapter->state); in igc_remove()
7275 del_timer_sync(&adapter->watchdog_timer); in igc_remove()
7276 del_timer_sync(&adapter->phy_info_timer); in igc_remove()
7278 cancel_work_sync(&adapter->reset_task); in igc_remove()
7279 cancel_work_sync(&adapter->watchdog_task); in igc_remove()
7280 hrtimer_cancel(&adapter->hrtimer); in igc_remove()
7292 pci_iounmap(pdev, adapter->io_addr); in igc_remove()
7305 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; in __igc_shutdown()
7306 struct igc_hw *hw = &adapter->hw; in __igc_shutdown()
7329 /* turn on all-multi mode if wake on multicast is enabled */ in __igc_shutdown()
7350 wake = wufc || adapter->en_mng_pt; in __igc_shutdown()
7352 igc_power_down_phy_copper_base(&adapter->hw); in __igc_shutdown()
7377 struct igc_hw *hw = &adapter->hw; in igc_deliver_wake_packet()
7395 /* Ensure reads are 32-bit aligned */ in igc_deliver_wake_packet()
7398 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); in igc_deliver_wake_packet()
7400 skb->protocol = eth_type_trans(skb, netdev); in igc_deliver_wake_packet()
7404 static int __igc_resume(struct device *dev, bool rpm) in __igc_resume() argument
7409 struct igc_hw *hw = &adapter->hw; in __igc_resume()
7417 return -ENODEV; in __igc_resume()
7430 return -ENOMEM; in __igc_resume()
7447 if (!rpm) in __igc_resume()
7450 if (!rpm) in __igc_resume()
7482 return -EBUSY; in igc_runtime_idle()
7498 * igc_io_error_detected - called when PCI error is detected
7529 * igc_io_slot_reset - called after the PCI bus has been reset.
7532 * Restart the card from scratch, as if from a cold-boot. Implementation
7533 * resembles the first-half of the __igc_resume routine.
7539 struct igc_hw *hw = &adapter->hw; in igc_io_slot_reset()
7543 netdev_err(netdev, "Could not re-enable PCI device after reset\n"); in igc_io_slot_reset()
7554 * so we should re-assign it here. in igc_io_slot_reset()
7556 hw->hw_addr = adapter->io_addr; in igc_io_slot_reset()
7567 * igc_io_resume - called when traffic can start to flow again.
7572 * second-half of the __igc_resume routine.
7618 * igc_reinit_queues - return error
7623 struct net_device *netdev = adapter->netdev; in igc_reinit_queues()
7633 return -ENOMEM; in igc_reinit_queues()
7643 * igc_get_hw_dev - return device
7650 struct igc_adapter *adapter = hw->back; in igc_get_hw_dev()
7652 return adapter->netdev; in igc_get_hw_dev()
7657 struct igc_hw *hw = &ring->q_vector->adapter->hw; in igc_disable_rx_ring_hw()
7658 u8 idx = ring->reg_idx; in igc_disable_rx_ring_hw()
7675 struct igc_adapter *adapter = ring->q_vector->adapter; in igc_enable_rx_ring()
7679 if (ring->xsk_pool) in igc_enable_rx_ring()
7693 struct igc_adapter *adapter = ring->q_vector->adapter; in igc_enable_tx_ring()
7699 * igc_init_module - Driver Registration Routine
7718 * igc_exit_module - Driver Exit Cleanup Routine