Lines Matching +full:cts +full:- +full:override
1 // SPDX-License-Identifier: GPL-2.0-or-later
4 Copyright (C) 2004 - 2010 Ivo van Doorn <[email protected]>
5 Copyright (C) 2004 - 2009 Gertjan van Wingerde <[email protected]>
18 #include <linux/dma-mapping.h>
25 struct data_queue *queue = entry->queue; in rt2x00queue_alloc_rxskb()
26 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2x00queue_alloc_rxskb()
37 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; in rt2x00queue_alloc_rxskb()
40 * The payload should be aligned to a 4-byte boundary, in rt2x00queue_alloc_rxskb()
79 skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, in rt2x00queue_alloc_rxskb()
81 if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) { in rt2x00queue_alloc_rxskb()
86 skbdesc->skb_dma = skb_dma; in rt2x00queue_alloc_rxskb()
87 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; in rt2x00queue_alloc_rxskb()
95 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_map_txskb()
96 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); in rt2x00queue_map_txskb()
98 skbdesc->skb_dma = in rt2x00queue_map_txskb()
99 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE); in rt2x00queue_map_txskb()
101 if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma))) in rt2x00queue_map_txskb()
102 return -ENOMEM; in rt2x00queue_map_txskb()
104 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; in rt2x00queue_map_txskb()
112 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_unmap_skb()
113 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); in rt2x00queue_unmap_skb()
115 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { in rt2x00queue_unmap_skb()
116 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, in rt2x00queue_unmap_skb()
118 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; in rt2x00queue_unmap_skb()
119 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { in rt2x00queue_unmap_skb()
120 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, in rt2x00queue_unmap_skb()
122 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; in rt2x00queue_unmap_skb()
129 if (!entry->skb) in rt2x00queue_free_skb()
133 dev_kfree_skb_any(entry->skb); in rt2x00queue_free_skb()
134 entry->skb = NULL; in rt2x00queue_free_skb()
139 unsigned int frame_length = skb->len; in rt2x00queue_align_frame()
146 memmove(skb->data, skb->data + align, frame_length); in rt2x00queue_align_frame()
156 unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; in rt2x00queue_insert_l2pad()
162 memmove(skb->data, skb->data + l2pad, hdr_len); in rt2x00queue_insert_l2pad()
167 unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; in rt2x00queue_remove_l2pad()
172 memmove(skb->data + l2pad, skb->data, hdr_len); in rt2x00queue_remove_l2pad()
181 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in rt2x00queue_create_tx_descriptor_seq()
182 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); in rt2x00queue_create_tx_descriptor_seq()
185 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) in rt2x00queue_create_tx_descriptor_seq()
188 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); in rt2x00queue_create_tx_descriptor_seq()
193 * seqno on retransmitted data (non-QOS) and management frames. in rt2x00queue_create_tx_descriptor_seq()
198 if (ieee80211_is_beacon(hdr->frame_control)) { in rt2x00queue_create_tx_descriptor_seq()
199 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); in rt2x00queue_create_tx_descriptor_seq()
204 __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); in rt2x00queue_create_tx_descriptor_seq()
215 * sequence counting per-frame, since those will override the in rt2x00queue_create_tx_descriptor_seq()
218 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) in rt2x00queue_create_tx_descriptor_seq()
219 seqno = atomic_add_return(0x10, &intf->seqno); in rt2x00queue_create_tx_descriptor_seq()
221 seqno = atomic_read(&intf->seqno); in rt2x00queue_create_tx_descriptor_seq()
223 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); in rt2x00queue_create_tx_descriptor_seq()
224 hdr->seq_ctrl |= cpu_to_le16(seqno); in rt2x00queue_create_tx_descriptor_seq()
233 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; in rt2x00queue_create_tx_descriptor_plcp()
241 * or this fragment came after RTS/CTS. in rt2x00queue_create_tx_descriptor_plcp()
243 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) in rt2x00queue_create_tx_descriptor_plcp()
244 txdesc->u.plcp.ifs = IFS_BACKOFF; in rt2x00queue_create_tx_descriptor_plcp()
246 txdesc->u.plcp.ifs = IFS_SIFS; in rt2x00queue_create_tx_descriptor_plcp()
249 data_length = skb->len + 4; in rt2x00queue_create_tx_descriptor_plcp()
256 txdesc->u.plcp.signal = hwrate->plcp; in rt2x00queue_create_tx_descriptor_plcp()
257 txdesc->u.plcp.service = 0x04; in rt2x00queue_create_tx_descriptor_plcp()
259 if (hwrate->flags & DEV_RATE_OFDM) { in rt2x00queue_create_tx_descriptor_plcp()
260 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f; in rt2x00queue_create_tx_descriptor_plcp()
261 txdesc->u.plcp.length_low = data_length & 0x3f; in rt2x00queue_create_tx_descriptor_plcp()
266 residual = GET_DURATION_RES(data_length, hwrate->bitrate); in rt2x00queue_create_tx_descriptor_plcp()
267 duration = GET_DURATION(data_length, hwrate->bitrate); in rt2x00queue_create_tx_descriptor_plcp()
275 if (hwrate->bitrate == 110 && residual <= 30) in rt2x00queue_create_tx_descriptor_plcp()
276 txdesc->u.plcp.service |= 0x80; in rt2x00queue_create_tx_descriptor_plcp()
279 txdesc->u.plcp.length_high = (duration >> 8) & 0xff; in rt2x00queue_create_tx_descriptor_plcp()
280 txdesc->u.plcp.length_low = duration & 0xff; in rt2x00queue_create_tx_descriptor_plcp()
286 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) in rt2x00queue_create_tx_descriptor_plcp()
287 txdesc->u.plcp.signal |= 0x08; in rt2x00queue_create_tx_descriptor_plcp()
298 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; in rt2x00queue_create_tx_descriptor_ht()
299 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in rt2x00queue_create_tx_descriptor_ht()
305 txdesc->u.ht.wcid = sta_priv->wcid; in rt2x00queue_create_tx_descriptor_ht()
306 density = sta->deflink.ht_cap.ampdu_density; in rt2x00queue_create_tx_descriptor_ht()
310 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the in rt2x00queue_create_tx_descriptor_ht()
313 if (txrate->flags & IEEE80211_TX_RC_MCS) { in rt2x00queue_create_tx_descriptor_ht()
314 txdesc->u.ht.mcs = txrate->idx; in rt2x00queue_create_tx_descriptor_ht()
320 if (sta && txdesc->u.ht.mcs > 7 && in rt2x00queue_create_tx_descriptor_ht()
321 sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC) in rt2x00queue_create_tx_descriptor_ht()
322 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); in rt2x00queue_create_tx_descriptor_ht()
324 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs); in rt2x00queue_create_tx_descriptor_ht()
325 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) in rt2x00queue_create_tx_descriptor_ht()
326 txdesc->u.ht.mcs |= 0x08; in rt2x00queue_create_tx_descriptor_ht()
329 if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) { in rt2x00queue_create_tx_descriptor_ht()
330 if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) in rt2x00queue_create_tx_descriptor_ht()
331 txdesc->u.ht.txop = TXOP_SIFS; in rt2x00queue_create_tx_descriptor_ht()
333 txdesc->u.ht.txop = TXOP_BACKOFF; in rt2x00queue_create_tx_descriptor_ht()
342 if (tx_info->flags & IEEE80211_TX_CTL_STBC) in rt2x00queue_create_tx_descriptor_ht()
343 txdesc->u.ht.stbc = 1; in rt2x00queue_create_tx_descriptor_ht()
349 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU && in rt2x00queue_create_tx_descriptor_ht()
350 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { in rt2x00queue_create_tx_descriptor_ht()
351 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); in rt2x00queue_create_tx_descriptor_ht()
352 txdesc->u.ht.mpdu_density = density; in rt2x00queue_create_tx_descriptor_ht()
353 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ in rt2x00queue_create_tx_descriptor_ht()
360 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH || in rt2x00queue_create_tx_descriptor_ht()
361 txrate->flags & IEEE80211_TX_RC_DUP_DATA) in rt2x00queue_create_tx_descriptor_ht()
362 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags); in rt2x00queue_create_tx_descriptor_ht()
363 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) in rt2x00queue_create_tx_descriptor_ht()
364 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags); in rt2x00queue_create_tx_descriptor_ht()
368 * - Use TXOP_BACKOFF for management frames except beacons in rt2x00queue_create_tx_descriptor_ht()
369 * - Use TXOP_SIFS for fragment bursts in rt2x00queue_create_tx_descriptor_ht()
370 * - Use TXOP_HTTXOP for everything else in rt2x00queue_create_tx_descriptor_ht()
372 * Note: rt2800 devices won't use CTS protection (if used) in rt2x00queue_create_tx_descriptor_ht()
375 if (ieee80211_is_mgmt(hdr->frame_control) && in rt2x00queue_create_tx_descriptor_ht()
376 !ieee80211_is_beacon(hdr->frame_control)) in rt2x00queue_create_tx_descriptor_ht()
377 txdesc->u.ht.txop = TXOP_BACKOFF; in rt2x00queue_create_tx_descriptor_ht()
378 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) in rt2x00queue_create_tx_descriptor_ht()
379 txdesc->u.ht.txop = TXOP_SIFS; in rt2x00queue_create_tx_descriptor_ht()
381 txdesc->u.ht.txop = TXOP_HTTXOP; in rt2x00queue_create_tx_descriptor_ht()
390 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in rt2x00queue_create_tx_descriptor()
391 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; in rt2x00queue_create_tx_descriptor()
400 txdesc->length = skb->len; in rt2x00queue_create_tx_descriptor()
401 txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb); in rt2x00queue_create_tx_descriptor()
406 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) in rt2x00queue_create_tx_descriptor()
407 __set_bit(ENTRY_TXD_ACK, &txdesc->flags); in rt2x00queue_create_tx_descriptor()
410 * Check if this is a RTS/CTS frame in rt2x00queue_create_tx_descriptor()
412 if (ieee80211_is_rts(hdr->frame_control) || in rt2x00queue_create_tx_descriptor()
413 ieee80211_is_cts(hdr->frame_control)) { in rt2x00queue_create_tx_descriptor()
414 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); in rt2x00queue_create_tx_descriptor()
415 if (ieee80211_is_rts(hdr->frame_control)) in rt2x00queue_create_tx_descriptor()
416 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags); in rt2x00queue_create_tx_descriptor()
418 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags); in rt2x00queue_create_tx_descriptor()
424 txdesc->retry_limit = tx_info->control.rates[0].count - 1; in rt2x00queue_create_tx_descriptor()
425 if (txdesc->retry_limit >= rt2x00dev->long_retry) in rt2x00queue_create_tx_descriptor()
426 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags); in rt2x00queue_create_tx_descriptor()
431 if (ieee80211_has_morefrags(hdr->frame_control)) { in rt2x00queue_create_tx_descriptor()
432 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); in rt2x00queue_create_tx_descriptor()
433 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags); in rt2x00queue_create_tx_descriptor()
439 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES) in rt2x00queue_create_tx_descriptor()
440 __set_bit(ENTRY_TXD_BURST, &txdesc->flags); in rt2x00queue_create_tx_descriptor()
446 if ((ieee80211_is_beacon(hdr->frame_control) || in rt2x00queue_create_tx_descriptor()
447 ieee80211_is_probe_resp(hdr->frame_control)) && in rt2x00queue_create_tx_descriptor()
448 !(tx_info->flags & IEEE80211_TX_CTL_INJECTED)) in rt2x00queue_create_tx_descriptor()
449 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); in rt2x00queue_create_tx_descriptor()
451 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && in rt2x00queue_create_tx_descriptor()
452 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) in rt2x00queue_create_tx_descriptor()
453 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); in rt2x00queue_create_tx_descriptor()
458 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) in rt2x00queue_create_tx_descriptor()
459 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; in rt2x00queue_create_tx_descriptor()
460 else if (txrate->flags & IEEE80211_TX_RC_MCS) in rt2x00queue_create_tx_descriptor()
461 txdesc->rate_mode = RATE_MODE_HT_MIX; in rt2x00queue_create_tx_descriptor()
463 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); in rt2x00queue_create_tx_descriptor()
464 hwrate = rt2x00_get_rate(rate->hw_value); in rt2x00queue_create_tx_descriptor()
465 if (hwrate->flags & DEV_RATE_OFDM) in rt2x00queue_create_tx_descriptor()
466 txdesc->rate_mode = RATE_MODE_OFDM; in rt2x00queue_create_tx_descriptor()
468 txdesc->rate_mode = RATE_MODE_CCK; in rt2x00queue_create_tx_descriptor()
488 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_write_tx_data()
495 if (unlikely(rt2x00dev->ops->lib->get_entry_state && in rt2x00queue_write_tx_data()
496 rt2x00dev->ops->lib->get_entry_state(entry))) { in rt2x00queue_write_tx_data()
500 entry->queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_data()
501 return -EINVAL; in rt2x00queue_write_tx_data()
507 skb_push(entry->skb, rt2x00dev->extra_tx_headroom); in rt2x00queue_write_tx_data()
508 memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom); in rt2x00queue_write_tx_data()
513 if (rt2x00dev->ops->lib->write_tx_data) in rt2x00queue_write_tx_data()
514 rt2x00dev->ops->lib->write_tx_data(entry, txdesc); in rt2x00queue_write_tx_data()
521 return -ENOMEM; in rt2x00queue_write_tx_data()
529 struct data_queue *queue = entry->queue; in rt2x00queue_write_tx_descriptor()
531 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); in rt2x00queue_write_tx_descriptor()
537 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry); in rt2x00queue_write_tx_descriptor()
548 * This is true for fragments, RTS or CTS-to-self frames. in rt2x00queue_kick_tx_queue()
553 !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) in rt2x00queue_kick_tx_queue()
554 queue->rt2x00dev->ops->lib->kick_queue(queue); in rt2x00queue_kick_tx_queue()
559 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_bar_check()
560 struct ieee80211_bar *bar = (void *) (entry->skb->data + in rt2x00queue_bar_check()
561 rt2x00dev->extra_tx_headroom); in rt2x00queue_bar_check()
564 if (likely(!ieee80211_is_back_req(bar->frame_control))) in rt2x00queue_bar_check()
577 bar_entry->entry = entry; in rt2x00queue_bar_check()
578 bar_entry->block_acked = 0; in rt2x00queue_bar_check()
582 * such that we can use RCU for less-overhead in the RX path since in rt2x00queue_bar_check()
586 memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra)); in rt2x00queue_bar_check()
587 memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta)); in rt2x00queue_bar_check()
588 bar_entry->control = bar->control; in rt2x00queue_bar_check()
589 bar_entry->start_seq_num = bar->start_seq_num; in rt2x00queue_bar_check()
594 spin_lock_bh(&rt2x00dev->bar_list_lock); in rt2x00queue_bar_check()
595 list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list); in rt2x00queue_bar_check()
596 spin_unlock_bh(&rt2x00dev->bar_list_lock); in rt2x00queue_bar_check()
611 * after that we are free to use the skb->cb array in rt2x00queue_write_tx_frame()
614 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta); in rt2x00queue_write_tx_frame()
617 * All information is retrieved from the skb->cb array, in rt2x00queue_write_tx_frame()
622 rate_idx = tx_info->control.rates[0].idx; in rt2x00queue_write_tx_frame()
623 rate_flags = tx_info->control.rates[0].flags; in rt2x00queue_write_tx_frame()
626 skbdesc->tx_rate_idx = rate_idx; in rt2x00queue_write_tx_frame()
627 skbdesc->tx_rate_flags = rate_flags; in rt2x00queue_write_tx_frame()
630 skbdesc->flags |= SKBDESC_NOT_MAC80211; in rt2x00queue_write_tx_frame()
639 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV)) in rt2x00queue_write_tx_frame()
647 * driver that the DMA is aligned to a 4-byte boundary. in rt2x00queue_write_tx_frame()
653 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD)) in rt2x00queue_write_tx_frame()
655 else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA)) in rt2x00queue_write_tx_frame()
661 spin_lock(&queue->tx_lock); in rt2x00queue_write_tx_frame()
664 rt2x00_dbg(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n", in rt2x00queue_write_tx_frame()
665 queue->qid); in rt2x00queue_write_tx_frame()
666 ret = -ENOBUFS; in rt2x00queue_write_tx_frame()
673 &entry->flags))) { in rt2x00queue_write_tx_frame()
674 rt2x00_err(queue->rt2x00dev, in rt2x00queue_write_tx_frame()
675 "Arrived at non-free entry in the non-full queue %d\n" in rt2x00queue_write_tx_frame()
677 queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_frame()
678 ret = -EINVAL; in rt2x00queue_write_tx_frame()
682 entry->skb = skb; in rt2x00queue_write_tx_frame()
690 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); in rt2x00queue_write_tx_frame()
691 entry->skb = NULL; in rt2x00queue_write_tx_frame()
692 ret = -EIO; in rt2x00queue_write_tx_frame()
701 set_bit(ENTRY_DATA_PENDING, &entry->flags); in rt2x00queue_write_tx_frame()
710 * do this under queue->tx_lock. Bottom halve was already disabled in rt2x00queue_write_tx_frame()
716 spin_unlock(&queue->tx_lock); in rt2x00queue_write_tx_frame()
725 if (unlikely(!intf->beacon)) in rt2x00queue_clear_beacon()
726 return -ENOBUFS; in rt2x00queue_clear_beacon()
731 rt2x00queue_free_skb(intf->beacon); in rt2x00queue_clear_beacon()
737 if (rt2x00dev->ops->lib->clear_beacon) in rt2x00queue_clear_beacon()
738 rt2x00dev->ops->lib->clear_beacon(intf->beacon); in rt2x00queue_clear_beacon()
750 if (unlikely(!intf->beacon)) in rt2x00queue_update_beacon()
751 return -ENOBUFS; in rt2x00queue_update_beacon()
756 rt2x00queue_free_skb(intf->beacon); in rt2x00queue_update_beacon()
758 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif, 0); in rt2x00queue_update_beacon()
759 if (!intf->beacon->skb) in rt2x00queue_update_beacon()
760 return -ENOMEM; in rt2x00queue_update_beacon()
764 * after that we are free to use the skb->cb array in rt2x00queue_update_beacon()
767 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL); in rt2x00queue_update_beacon()
772 skbdesc = get_skb_frame_desc(intf->beacon->skb); in rt2x00queue_update_beacon()
778 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); in rt2x00queue_update_beacon()
797 rt2x00_err(queue->rt2x00dev, in rt2x00queue_for_each_entry()
798 "Entry requested from invalid index range (%d - %d)\n", in rt2x00queue_for_each_entry()
809 spin_lock_irqsave(&queue->index_lock, irqflags); in rt2x00queue_for_each_entry()
810 index_start = queue->index[start]; in rt2x00queue_for_each_entry()
811 index_end = queue->index[end]; in rt2x00queue_for_each_entry()
812 spin_unlock_irqrestore(&queue->index_lock, irqflags); in rt2x00queue_for_each_entry()
820 if (fn(&queue->entries[i], data)) in rt2x00queue_for_each_entry()
824 for (i = index_start; i < queue->limit; i++) { in rt2x00queue_for_each_entry()
825 if (fn(&queue->entries[i], data)) in rt2x00queue_for_each_entry()
830 if (fn(&queue->entries[i], data)) in rt2x00queue_for_each_entry()
846 rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n", in rt2x00queue_get_entry()
851 spin_lock_irqsave(&queue->index_lock, irqflags); in rt2x00queue_get_entry()
853 entry = &queue->entries[queue->index[index]]; in rt2x00queue_get_entry()
855 spin_unlock_irqrestore(&queue->index_lock, irqflags); in rt2x00queue_get_entry()
863 struct data_queue *queue = entry->queue; in rt2x00queue_index_inc()
867 rt2x00_err(queue->rt2x00dev, in rt2x00queue_index_inc()
872 spin_lock_irqsave(&queue->index_lock, irqflags); in rt2x00queue_index_inc()
874 queue->index[index]++; in rt2x00queue_index_inc()
875 if (queue->index[index] >= queue->limit) in rt2x00queue_index_inc()
876 queue->index[index] = 0; in rt2x00queue_index_inc()
878 entry->last_action = jiffies; in rt2x00queue_index_inc()
881 queue->length++; in rt2x00queue_index_inc()
883 queue->length--; in rt2x00queue_index_inc()
884 queue->count++; in rt2x00queue_index_inc()
887 spin_unlock_irqrestore(&queue->index_lock, irqflags); in rt2x00queue_index_inc()
892 switch (queue->qid) { in rt2x00queue_pause_queue_nocheck()
901 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); in rt2x00queue_pause_queue_nocheck()
909 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || in rt2x00queue_pause_queue()
910 !test_bit(QUEUE_STARTED, &queue->flags) || in rt2x00queue_pause_queue()
911 test_and_set_bit(QUEUE_PAUSED, &queue->flags)) in rt2x00queue_pause_queue()
920 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || in rt2x00queue_unpause_queue()
921 !test_bit(QUEUE_STARTED, &queue->flags) || in rt2x00queue_unpause_queue()
922 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags)) in rt2x00queue_unpause_queue()
925 switch (queue->qid) { in rt2x00queue_unpause_queue()
934 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); in rt2x00queue_unpause_queue()
941 queue->rt2x00dev->ops->lib->kick_queue(queue); in rt2x00queue_unpause_queue()
951 mutex_lock(&queue->status_lock); in rt2x00queue_start_queue()
953 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || in rt2x00queue_start_queue()
954 test_and_set_bit(QUEUE_STARTED, &queue->flags)) { in rt2x00queue_start_queue()
955 mutex_unlock(&queue->status_lock); in rt2x00queue_start_queue()
959 set_bit(QUEUE_PAUSED, &queue->flags); in rt2x00queue_start_queue()
961 queue->rt2x00dev->ops->lib->start_queue(queue); in rt2x00queue_start_queue()
965 mutex_unlock(&queue->status_lock); in rt2x00queue_start_queue()
971 mutex_lock(&queue->status_lock); in rt2x00queue_stop_queue()
973 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) { in rt2x00queue_stop_queue()
974 mutex_unlock(&queue->status_lock); in rt2x00queue_stop_queue()
980 queue->rt2x00dev->ops->lib->stop_queue(queue); in rt2x00queue_stop_queue()
982 mutex_unlock(&queue->status_lock); in rt2x00queue_stop_queue()
989 (queue->qid == QID_AC_VO) || in rt2x00queue_flush_queue()
990 (queue->qid == QID_AC_VI) || in rt2x00queue_flush_queue()
991 (queue->qid == QID_AC_BE) || in rt2x00queue_flush_queue()
992 (queue->qid == QID_AC_BK); in rt2x00queue_flush_queue()
1004 queue->rt2x00dev->ops->lib->kick_queue(queue); in rt2x00queue_flush_queue()
1011 if (likely(queue->rt2x00dev->ops->lib->flush_queue)) in rt2x00queue_flush_queue()
1012 queue->rt2x00dev->ops->lib->flush_queue(queue, drop); in rt2x00queue_flush_queue()
1018 rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n", in rt2x00queue_flush_queue()
1019 queue->qid); in rt2x00queue_flush_queue()
1034 rt2x00queue_start_queue(rt2x00dev->rx); in rt2x00queue_start_queues()
1048 ieee80211_stop_queues(rt2x00dev->hw); in rt2x00queue_stop_queues()
1053 rt2x00queue_stop_queue(rt2x00dev->rx); in rt2x00queue_stop_queues()
1064 rt2x00queue_flush_queue(rt2x00dev->rx, drop); in rt2x00queue_flush_queues()
1073 spin_lock_irqsave(&queue->index_lock, irqflags); in rt2x00queue_reset()
1075 queue->count = 0; in rt2x00queue_reset()
1076 queue->length = 0; in rt2x00queue_reset()
1079 queue->index[i] = 0; in rt2x00queue_reset()
1081 spin_unlock_irqrestore(&queue->index_lock, irqflags); in rt2x00queue_reset()
1092 for (i = 0; i < queue->limit; i++) in rt2x00queue_init_queues()
1093 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); in rt2x00queue_init_queues()
1108 entry_size = sizeof(*entries) + queue->priv_size; in rt2x00queue_alloc_entries()
1109 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); in rt2x00queue_alloc_entries()
1111 return -ENOMEM; in rt2x00queue_alloc_entries()
1117 for (i = 0; i < queue->limit; i++) { in rt2x00queue_alloc_entries()
1123 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, in rt2x00queue_alloc_entries()
1124 sizeof(*entries), queue->priv_size); in rt2x00queue_alloc_entries()
1129 queue->entries = entries; in rt2x00queue_alloc_entries()
1138 if (!queue->entries) in rt2x00queue_free_skbs()
1141 for (i = 0; i < queue->limit; i++) { in rt2x00queue_free_skbs()
1142 rt2x00queue_free_skb(&queue->entries[i]); in rt2x00queue_free_skbs()
1151 for (i = 0; i < queue->limit; i++) { in rt2x00queue_alloc_rxskbs()
1152 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL); in rt2x00queue_alloc_rxskbs()
1154 return -ENOMEM; in rt2x00queue_alloc_rxskbs()
1155 queue->entries[i].skb = skb; in rt2x00queue_alloc_rxskbs()
1166 status = rt2x00queue_alloc_entries(rt2x00dev->rx); in rt2x00queue_initialize()
1176 status = rt2x00queue_alloc_entries(rt2x00dev->bcn); in rt2x00queue_initialize()
1181 status = rt2x00queue_alloc_entries(rt2x00dev->atim); in rt2x00queue_initialize()
1186 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx); in rt2x00queue_initialize()
1204 rt2x00queue_free_skbs(rt2x00dev->rx); in rt2x00queue_uninitialize()
1207 kfree(queue->entries); in rt2x00queue_uninitialize()
1208 queue->entries = NULL; in rt2x00queue_uninitialize()
1215 mutex_init(&queue->status_lock); in rt2x00queue_init()
1216 spin_lock_init(&queue->tx_lock); in rt2x00queue_init()
1217 spin_lock_init(&queue->index_lock); in rt2x00queue_init()
1219 queue->rt2x00dev = rt2x00dev; in rt2x00queue_init()
1220 queue->qid = qid; in rt2x00queue_init()
1221 queue->txop = 0; in rt2x00queue_init()
1222 queue->aifs = 2; in rt2x00queue_init()
1223 queue->cw_min = 5; in rt2x00queue_init()
1224 queue->cw_max = 10; in rt2x00queue_init()
1226 rt2x00dev->ops->queue_init(queue); in rt2x00queue_init()
1228 queue->threshold = DIV_ROUND_UP(queue->limit, 10); in rt2x00queue_init()
1241 * TX: ops->tx_queues in rt2x00queue_allocate()
1245 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; in rt2x00queue_allocate()
1247 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); in rt2x00queue_allocate()
1249 return -ENOMEM; in rt2x00queue_allocate()
1254 rt2x00dev->rx = queue; in rt2x00queue_allocate()
1255 rt2x00dev->tx = &queue[1]; in rt2x00queue_allocate()
1256 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; in rt2x00queue_allocate()
1257 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; in rt2x00queue_allocate()
1268 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); in rt2x00queue_allocate()
1274 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON); in rt2x00queue_allocate()
1276 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM); in rt2x00queue_allocate()
1283 kfree(rt2x00dev->rx); in rt2x00queue_free()
1284 rt2x00dev->rx = NULL; in rt2x00queue_free()
1285 rt2x00dev->tx = NULL; in rt2x00queue_free()
1286 rt2x00dev->bcn = NULL; in rt2x00queue_free()