1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2016 Felix Fietkau <[email protected]>
4 */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8
9 #define CHAN2G(_idx, _freq) { \
10 .band = NL80211_BAND_2GHZ, \
11 .center_freq = (_freq), \
12 .hw_value = (_idx), \
13 .max_power = 30, \
14 }
15
16 #define CHAN5G(_idx, _freq) { \
17 .band = NL80211_BAND_5GHZ, \
18 .center_freq = (_freq), \
19 .hw_value = (_idx), \
20 .max_power = 30, \
21 }
22
23 #define CHAN6G(_idx, _freq) { \
24 .band = NL80211_BAND_6GHZ, \
25 .center_freq = (_freq), \
26 .hw_value = (_idx), \
27 .max_power = 30, \
28 }
29
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 CHAN2G(1, 2412),
32 CHAN2G(2, 2417),
33 CHAN2G(3, 2422),
34 CHAN2G(4, 2427),
35 CHAN2G(5, 2432),
36 CHAN2G(6, 2437),
37 CHAN2G(7, 2442),
38 CHAN2G(8, 2447),
39 CHAN2G(9, 2452),
40 CHAN2G(10, 2457),
41 CHAN2G(11, 2462),
42 CHAN2G(12, 2467),
43 CHAN2G(13, 2472),
44 CHAN2G(14, 2484),
45 };
46
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 CHAN5G(36, 5180),
49 CHAN5G(40, 5200),
50 CHAN5G(44, 5220),
51 CHAN5G(48, 5240),
52
53 CHAN5G(52, 5260),
54 CHAN5G(56, 5280),
55 CHAN5G(60, 5300),
56 CHAN5G(64, 5320),
57
58 CHAN5G(100, 5500),
59 CHAN5G(104, 5520),
60 CHAN5G(108, 5540),
61 CHAN5G(112, 5560),
62 CHAN5G(116, 5580),
63 CHAN5G(120, 5600),
64 CHAN5G(124, 5620),
65 CHAN5G(128, 5640),
66 CHAN5G(132, 5660),
67 CHAN5G(136, 5680),
68 CHAN5G(140, 5700),
69 CHAN5G(144, 5720),
70
71 CHAN5G(149, 5745),
72 CHAN5G(153, 5765),
73 CHAN5G(157, 5785),
74 CHAN5G(161, 5805),
75 CHAN5G(165, 5825),
76 CHAN5G(169, 5845),
77 CHAN5G(173, 5865),
78 CHAN5G(177, 5885),
79 };
80
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 /* UNII-5 */
83 CHAN6G(1, 5955),
84 CHAN6G(5, 5975),
85 CHAN6G(9, 5995),
86 CHAN6G(13, 6015),
87 CHAN6G(17, 6035),
88 CHAN6G(21, 6055),
89 CHAN6G(25, 6075),
90 CHAN6G(29, 6095),
91 CHAN6G(33, 6115),
92 CHAN6G(37, 6135),
93 CHAN6G(41, 6155),
94 CHAN6G(45, 6175),
95 CHAN6G(49, 6195),
96 CHAN6G(53, 6215),
97 CHAN6G(57, 6235),
98 CHAN6G(61, 6255),
99 CHAN6G(65, 6275),
100 CHAN6G(69, 6295),
101 CHAN6G(73, 6315),
102 CHAN6G(77, 6335),
103 CHAN6G(81, 6355),
104 CHAN6G(85, 6375),
105 CHAN6G(89, 6395),
106 CHAN6G(93, 6415),
107 /* UNII-6 */
108 CHAN6G(97, 6435),
109 CHAN6G(101, 6455),
110 CHAN6G(105, 6475),
111 CHAN6G(109, 6495),
112 CHAN6G(113, 6515),
113 CHAN6G(117, 6535),
114 /* UNII-7 */
115 CHAN6G(121, 6555),
116 CHAN6G(125, 6575),
117 CHAN6G(129, 6595),
118 CHAN6G(133, 6615),
119 CHAN6G(137, 6635),
120 CHAN6G(141, 6655),
121 CHAN6G(145, 6675),
122 CHAN6G(149, 6695),
123 CHAN6G(153, 6715),
124 CHAN6G(157, 6735),
125 CHAN6G(161, 6755),
126 CHAN6G(165, 6775),
127 CHAN6G(169, 6795),
128 CHAN6G(173, 6815),
129 CHAN6G(177, 6835),
130 CHAN6G(181, 6855),
131 CHAN6G(185, 6875),
132 /* UNII-8 */
133 CHAN6G(189, 6895),
134 CHAN6G(193, 6915),
135 CHAN6G(197, 6935),
136 CHAN6G(201, 6955),
137 CHAN6G(205, 6975),
138 CHAN6G(209, 6995),
139 CHAN6G(213, 7015),
140 CHAN6G(217, 7035),
141 CHAN6G(221, 7055),
142 CHAN6G(225, 7075),
143 CHAN6G(229, 7095),
144 CHAN6G(233, 7115),
145 };
146
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 { .throughput = 0 * 1024, .blink_time = 334 },
149 { .throughput = 1 * 1024, .blink_time = 260 },
150 { .throughput = 5 * 1024, .blink_time = 220 },
151 { .throughput = 10 * 1024, .blink_time = 190 },
152 { .throughput = 20 * 1024, .blink_time = 170 },
153 { .throughput = 50 * 1024, .blink_time = 150 },
154 { .throughput = 70 * 1024, .blink_time = 130 },
155 { .throughput = 100 * 1024, .blink_time = 110 },
156 { .throughput = 200 * 1024, .blink_time = 80 },
157 { .throughput = 300 * 1024, .blink_time = 50 },
158 };
159
160 struct ieee80211_rate mt76_rates[] = {
161 CCK_RATE(0, 10),
162 CCK_RATE(1, 20),
163 CCK_RATE(2, 55),
164 CCK_RATE(3, 110),
165 OFDM_RATE(11, 60),
166 OFDM_RATE(15, 90),
167 OFDM_RATE(10, 120),
168 OFDM_RATE(14, 180),
169 OFDM_RATE(9, 240),
170 OFDM_RATE(13, 360),
171 OFDM_RATE(8, 480),
172 OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 { .start_freq = 2402, .end_freq = 2494, },
178 { .start_freq = 5150, .end_freq = 5350, },
179 { .start_freq = 5350, .end_freq = 5470, },
180 { .start_freq = 5470, .end_freq = 5725, },
181 { .start_freq = 5725, .end_freq = 5950, },
182 { .start_freq = 5945, .end_freq = 6165, },
183 { .start_freq = 6165, .end_freq = 6405, },
184 { .start_freq = 6405, .end_freq = 6525, },
185 { .start_freq = 6525, .end_freq = 6705, },
186 { .start_freq = 6705, .end_freq = 6865, },
187 { .start_freq = 6865, .end_freq = 7125, },
188 };
189
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 .type = NL80211_SAR_TYPE_POWER,
192 .num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 .freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195
mt76_led_init(struct mt76_phy * phy)196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 struct mt76_dev *dev = phy->dev;
199 struct ieee80211_hw *hw = phy->hw;
200 struct device_node *np = dev->dev->of_node;
201
202 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
203 return 0;
204
205 np = of_get_child_by_name(np, "led");
206 if (np) {
207 if (!of_device_is_available(np)) {
208 of_node_put(np);
209 dev_info(dev->dev,
210 "led registration was explicitly disabled by dts\n");
211 return 0;
212 }
213
214 if (phy == &dev->phy) {
215 int led_pin;
216
217 if (!of_property_read_u32(np, "led-sources", &led_pin))
218 phy->leds.pin = led_pin;
219
220 phy->leds.al =
221 of_property_read_bool(np, "led-active-low");
222 }
223
224 of_node_put(np);
225 }
226
227 snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
228 wiphy_name(hw->wiphy));
229
230 phy->leds.cdev.name = phy->leds.name;
231 phy->leds.cdev.default_trigger =
232 ieee80211_create_tpt_led_trigger(hw,
233 IEEE80211_TPT_LEDTRIG_FL_RADIO,
234 mt76_tpt_blink,
235 ARRAY_SIZE(mt76_tpt_blink));
236
237 dev_info(dev->dev,
238 "registering led '%s'\n", phy->leds.name);
239
240 return led_classdev_register(dev->dev, &phy->leds.cdev);
241 }
242
mt76_led_cleanup(struct mt76_phy * phy)243 static void mt76_led_cleanup(struct mt76_phy *phy)
244 {
245 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
246 return;
247
248 led_classdev_unregister(&phy->leds.cdev);
249 }
250
mt76_init_stream_cap(struct mt76_phy * phy,struct ieee80211_supported_band * sband,bool vht)251 static void mt76_init_stream_cap(struct mt76_phy *phy,
252 struct ieee80211_supported_band *sband,
253 bool vht)
254 {
255 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
256 int i, nstream = hweight8(phy->antenna_mask);
257 struct ieee80211_sta_vht_cap *vht_cap;
258 u16 mcs_map = 0;
259
260 if (nstream > 1)
261 ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
262 else
263 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
264
265 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
266 ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
267
268 if (!vht)
269 return;
270
271 vht_cap = &sband->vht_cap;
272 if (nstream > 1)
273 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
274 else
275 vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
276 vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
277 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
278
279 for (i = 0; i < 8; i++) {
280 if (i < nstream)
281 mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
282 else
283 mcs_map |=
284 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
285 }
286 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
287 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
288 if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
289 vht_cap->vht_mcs.tx_highest |=
290 cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
291 }
292
mt76_set_stream_caps(struct mt76_phy * phy,bool vht)293 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
294 {
295 if (phy->cap.has_2ghz)
296 mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
297 if (phy->cap.has_5ghz)
298 mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
299 if (phy->cap.has_6ghz)
300 mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
301 }
302 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
303
304 static int
mt76_init_sband(struct mt76_phy * phy,struct mt76_sband * msband,const struct ieee80211_channel * chan,int n_chan,struct ieee80211_rate * rates,int n_rates,bool ht,bool vht)305 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
306 const struct ieee80211_channel *chan, int n_chan,
307 struct ieee80211_rate *rates, int n_rates,
308 bool ht, bool vht)
309 {
310 struct ieee80211_supported_band *sband = &msband->sband;
311 struct ieee80211_sta_vht_cap *vht_cap;
312 struct ieee80211_sta_ht_cap *ht_cap;
313 struct mt76_dev *dev = phy->dev;
314 void *chanlist;
315 int size;
316
317 size = n_chan * sizeof(*chan);
318 chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
319 if (!chanlist)
320 return -ENOMEM;
321
322 msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
323 GFP_KERNEL);
324 if (!msband->chan)
325 return -ENOMEM;
326
327 sband->channels = chanlist;
328 sband->n_channels = n_chan;
329 sband->bitrates = rates;
330 sband->n_bitrates = n_rates;
331
332 if (!ht)
333 return 0;
334
335 ht_cap = &sband->ht_cap;
336 ht_cap->ht_supported = true;
337 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
338 IEEE80211_HT_CAP_GRN_FLD |
339 IEEE80211_HT_CAP_SGI_20 |
340 IEEE80211_HT_CAP_SGI_40 |
341 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
342
343 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
344 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
345
346 mt76_init_stream_cap(phy, sband, vht);
347
348 if (!vht)
349 return 0;
350
351 vht_cap = &sband->vht_cap;
352 vht_cap->vht_supported = true;
353 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
354 IEEE80211_VHT_CAP_RXSTBC_1 |
355 IEEE80211_VHT_CAP_SHORT_GI_80 |
356 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
357
358 return 0;
359 }
360
361 static int
mt76_init_sband_2g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)362 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 int n_rates)
364 {
365 phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
366
367 return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
368 ARRAY_SIZE(mt76_channels_2ghz), rates,
369 n_rates, true, false);
370 }
371
372 static int
mt76_init_sband_5g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates,bool vht)373 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 int n_rates, bool vht)
375 {
376 phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
377
378 return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
379 ARRAY_SIZE(mt76_channels_5ghz), rates,
380 n_rates, true, vht);
381 }
382
383 static int
mt76_init_sband_6g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)384 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
385 int n_rates)
386 {
387 phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
388
389 return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
390 ARRAY_SIZE(mt76_channels_6ghz), rates,
391 n_rates, false, false);
392 }
393
394 static void
mt76_check_sband(struct mt76_phy * phy,struct mt76_sband * msband,enum nl80211_band band)395 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
396 enum nl80211_band band)
397 {
398 struct ieee80211_supported_band *sband = &msband->sband;
399 bool found = false;
400 int i;
401
402 if (!sband)
403 return;
404
405 for (i = 0; i < sband->n_channels; i++) {
406 if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
407 continue;
408
409 found = true;
410 break;
411 }
412
413 if (found) {
414 cfg80211_chandef_create(&phy->chandef, &sband->channels[0],
415 NL80211_CHAN_HT20);
416 phy->chan_state = &msband->chan[0];
417 phy->dev->band_phys[band] = phy;
418 return;
419 }
420
421 sband->n_channels = 0;
422 if (phy->hw->wiphy->bands[band] == sband)
423 phy->hw->wiphy->bands[band] = NULL;
424 }
425
426 static int
mt76_phy_init(struct mt76_phy * phy,struct ieee80211_hw * hw)427 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
428 {
429 struct mt76_dev *dev = phy->dev;
430 struct wiphy *wiphy = hw->wiphy;
431
432 INIT_LIST_HEAD(&phy->tx_list);
433 spin_lock_init(&phy->tx_lock);
434 INIT_DELAYED_WORK(&phy->roc_work, mt76_roc_complete_work);
435
436 if ((void *)phy != hw->priv)
437 return 0;
438
439 SET_IEEE80211_DEV(hw, dev->dev);
440 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
441
442 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
443 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
444 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
445 WIPHY_FLAG_SUPPORTS_TDLS |
446 WIPHY_FLAG_AP_UAPSD;
447
448 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
449 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
450 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
451
452 wiphy->available_antennas_tx = phy->antenna_mask;
453 wiphy->available_antennas_rx = phy->antenna_mask;
454
455 wiphy->sar_capa = &mt76_sar_capa;
456 phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
457 sizeof(struct mt76_freq_range_power),
458 GFP_KERNEL);
459 if (!phy->frp)
460 return -ENOMEM;
461
462 hw->txq_data_size = sizeof(struct mt76_txq);
463 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
464
465 if (!hw->max_tx_fragments)
466 hw->max_tx_fragments = 16;
467
468 ieee80211_hw_set(hw, SIGNAL_DBM);
469 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
470 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
471 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
472 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
473 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
474 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
475 ieee80211_hw_set(hw, SPECTRUM_MGMT);
476
477 if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
478 hw->max_tx_fragments > 1) {
479 ieee80211_hw_set(hw, TX_AMSDU);
480 ieee80211_hw_set(hw, TX_FRAG_LIST);
481 }
482
483 ieee80211_hw_set(hw, MFP_CAPABLE);
484 ieee80211_hw_set(hw, AP_LINK_PS);
485 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
486
487 return 0;
488 }
489
490 struct mt76_phy *
mt76_alloc_radio_phy(struct mt76_dev * dev,unsigned int size,u8 band_idx)491 mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
492 u8 band_idx)
493 {
494 struct ieee80211_hw *hw = dev->phy.hw;
495 unsigned int phy_size;
496 struct mt76_phy *phy;
497
498 phy_size = ALIGN(sizeof(*phy), 8);
499 phy = devm_kzalloc(dev->dev, size + phy_size, GFP_KERNEL);
500 if (!phy)
501 return NULL;
502
503 phy->dev = dev;
504 phy->hw = hw;
505 phy->priv = (void *)phy + phy_size;
506 phy->band_idx = band_idx;
507
508 return phy;
509 }
510 EXPORT_SYMBOL_GPL(mt76_alloc_radio_phy);
511
512 struct mt76_phy *
mt76_alloc_phy(struct mt76_dev * dev,unsigned int size,const struct ieee80211_ops * ops,u8 band_idx)513 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
514 const struct ieee80211_ops *ops, u8 band_idx)
515 {
516 struct ieee80211_hw *hw;
517 unsigned int phy_size;
518 struct mt76_phy *phy;
519
520 phy_size = ALIGN(sizeof(*phy), 8);
521 hw = ieee80211_alloc_hw(size + phy_size, ops);
522 if (!hw)
523 return NULL;
524
525 phy = hw->priv;
526 phy->dev = dev;
527 phy->hw = hw;
528 phy->priv = hw->priv + phy_size;
529 phy->band_idx = band_idx;
530
531 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
532 hw->wiphy->interface_modes =
533 BIT(NL80211_IFTYPE_STATION) |
534 BIT(NL80211_IFTYPE_AP) |
535 #ifdef CONFIG_MAC80211_MESH
536 BIT(NL80211_IFTYPE_MESH_POINT) |
537 #endif
538 BIT(NL80211_IFTYPE_P2P_CLIENT) |
539 BIT(NL80211_IFTYPE_P2P_GO) |
540 BIT(NL80211_IFTYPE_ADHOC);
541
542 return phy;
543 }
544 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
545
mt76_register_phy(struct mt76_phy * phy,bool vht,struct ieee80211_rate * rates,int n_rates)546 int mt76_register_phy(struct mt76_phy *phy, bool vht,
547 struct ieee80211_rate *rates, int n_rates)
548 {
549 int ret;
550
551 ret = mt76_phy_init(phy, phy->hw);
552 if (ret)
553 return ret;
554
555 if (phy->cap.has_2ghz) {
556 ret = mt76_init_sband_2g(phy, rates, n_rates);
557 if (ret)
558 return ret;
559 }
560
561 if (phy->cap.has_5ghz) {
562 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
563 if (ret)
564 return ret;
565 }
566
567 if (phy->cap.has_6ghz) {
568 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
569 if (ret)
570 return ret;
571 }
572
573 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
574 ret = mt76_led_init(phy);
575 if (ret)
576 return ret;
577 }
578
579 wiphy_read_of_freq_limits(phy->hw->wiphy);
580 mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
581 mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
582 mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
583
584 if ((void *)phy == phy->hw->priv) {
585 ret = ieee80211_register_hw(phy->hw);
586 if (ret)
587 return ret;
588 }
589
590 set_bit(MT76_STATE_REGISTERED, &phy->state);
591 phy->dev->phys[phy->band_idx] = phy;
592
593 return 0;
594 }
595 EXPORT_SYMBOL_GPL(mt76_register_phy);
596
mt76_unregister_phy(struct mt76_phy * phy)597 void mt76_unregister_phy(struct mt76_phy *phy)
598 {
599 struct mt76_dev *dev = phy->dev;
600
601 if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
602 return;
603
604 if (IS_ENABLED(CONFIG_MT76_LEDS))
605 mt76_led_cleanup(phy);
606 mt76_tx_status_check(dev, true);
607 ieee80211_unregister_hw(phy->hw);
608 dev->phys[phy->band_idx] = NULL;
609 }
610 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
611
mt76_create_page_pool(struct mt76_dev * dev,struct mt76_queue * q)612 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
613 {
614 bool is_qrx = mt76_queue_is_rx(dev, q);
615 struct page_pool_params pp_params = {
616 .order = 0,
617 .flags = 0,
618 .nid = NUMA_NO_NODE,
619 .dev = dev->dma_dev,
620 };
621 int idx = is_qrx ? q - dev->q_rx : -1;
622
623 /* Allocate page_pools just for rx/wed_tx_free queues */
624 if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
625 return 0;
626
627 switch (idx) {
628 case MT_RXQ_MAIN:
629 case MT_RXQ_BAND1:
630 case MT_RXQ_BAND2:
631 pp_params.pool_size = 256;
632 break;
633 default:
634 pp_params.pool_size = 16;
635 break;
636 }
637
638 if (mt76_is_mmio(dev)) {
639 /* rely on page_pool for DMA mapping */
640 pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
641 pp_params.dma_dir = DMA_FROM_DEVICE;
642 pp_params.max_len = PAGE_SIZE;
643 pp_params.offset = 0;
644 /* NAPI is available just for rx queues */
645 if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
646 pp_params.napi = &dev->napi[idx];
647 }
648
649 q->page_pool = page_pool_create(&pp_params);
650 if (IS_ERR(q->page_pool)) {
651 int err = PTR_ERR(q->page_pool);
652
653 q->page_pool = NULL;
654 return err;
655 }
656
657 return 0;
658 }
659 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
660
661 struct mt76_dev *
mt76_alloc_device(struct device * pdev,unsigned int size,const struct ieee80211_ops * ops,const struct mt76_driver_ops * drv_ops)662 mt76_alloc_device(struct device *pdev, unsigned int size,
663 const struct ieee80211_ops *ops,
664 const struct mt76_driver_ops *drv_ops)
665 {
666 struct ieee80211_hw *hw;
667 struct mt76_phy *phy;
668 struct mt76_dev *dev;
669 int i;
670
671 hw = ieee80211_alloc_hw(size, ops);
672 if (!hw)
673 return NULL;
674
675 dev = hw->priv;
676 dev->hw = hw;
677 dev->dev = pdev;
678 dev->drv = drv_ops;
679 dev->dma_dev = pdev;
680
681 phy = &dev->phy;
682 phy->dev = dev;
683 phy->hw = hw;
684 phy->band_idx = MT_BAND0;
685 dev->phys[phy->band_idx] = phy;
686
687 spin_lock_init(&dev->rx_lock);
688 spin_lock_init(&dev->lock);
689 spin_lock_init(&dev->cc_lock);
690 spin_lock_init(&dev->status_lock);
691 spin_lock_init(&dev->wed_lock);
692 mutex_init(&dev->mutex);
693 init_waitqueue_head(&dev->tx_wait);
694
695 skb_queue_head_init(&dev->mcu.res_q);
696 init_waitqueue_head(&dev->mcu.wait);
697 mutex_init(&dev->mcu.mutex);
698 dev->tx_worker.fn = mt76_tx_worker;
699
700 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
701 hw->wiphy->interface_modes =
702 BIT(NL80211_IFTYPE_STATION) |
703 BIT(NL80211_IFTYPE_AP) |
704 #ifdef CONFIG_MAC80211_MESH
705 BIT(NL80211_IFTYPE_MESH_POINT) |
706 #endif
707 BIT(NL80211_IFTYPE_P2P_CLIENT) |
708 BIT(NL80211_IFTYPE_P2P_GO) |
709 BIT(NL80211_IFTYPE_ADHOC);
710
711 spin_lock_init(&dev->token_lock);
712 idr_init(&dev->token);
713
714 spin_lock_init(&dev->rx_token_lock);
715 idr_init(&dev->rx_token);
716
717 INIT_LIST_HEAD(&dev->wcid_list);
718 INIT_LIST_HEAD(&dev->sta_poll_list);
719 spin_lock_init(&dev->sta_poll_lock);
720
721 INIT_LIST_HEAD(&dev->txwi_cache);
722 INIT_LIST_HEAD(&dev->rxwi_cache);
723 dev->token_size = dev->drv->token_size;
724 INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work);
725
726 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
727 skb_queue_head_init(&dev->rx_skb[i]);
728
729 dev->wq = alloc_ordered_workqueue("mt76", 0);
730 if (!dev->wq) {
731 ieee80211_free_hw(hw);
732 return NULL;
733 }
734
735 return dev;
736 }
737 EXPORT_SYMBOL_GPL(mt76_alloc_device);
738
mt76_register_device(struct mt76_dev * dev,bool vht,struct ieee80211_rate * rates,int n_rates)739 int mt76_register_device(struct mt76_dev *dev, bool vht,
740 struct ieee80211_rate *rates, int n_rates)
741 {
742 struct ieee80211_hw *hw = dev->hw;
743 struct mt76_phy *phy = &dev->phy;
744 int ret;
745
746 dev_set_drvdata(dev->dev, dev);
747 mt76_wcid_init(&dev->global_wcid, phy->band_idx);
748 ret = mt76_phy_init(phy, hw);
749 if (ret)
750 return ret;
751
752 if (phy->cap.has_2ghz) {
753 ret = mt76_init_sband_2g(phy, rates, n_rates);
754 if (ret)
755 return ret;
756 }
757
758 if (phy->cap.has_5ghz) {
759 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
760 if (ret)
761 return ret;
762 }
763
764 if (phy->cap.has_6ghz) {
765 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
766 if (ret)
767 return ret;
768 }
769
770 wiphy_read_of_freq_limits(hw->wiphy);
771 mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
772 mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
773 mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
774
775 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
776 ret = mt76_led_init(phy);
777 if (ret)
778 return ret;
779 }
780
781 ret = ieee80211_register_hw(hw);
782 if (ret)
783 return ret;
784
785 WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
786 set_bit(MT76_STATE_REGISTERED, &phy->state);
787 sched_set_fifo_low(dev->tx_worker.task);
788
789 return 0;
790 }
791 EXPORT_SYMBOL_GPL(mt76_register_device);
792
mt76_unregister_device(struct mt76_dev * dev)793 void mt76_unregister_device(struct mt76_dev *dev)
794 {
795 struct ieee80211_hw *hw = dev->hw;
796
797 if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
798 return;
799
800 if (IS_ENABLED(CONFIG_MT76_LEDS))
801 mt76_led_cleanup(&dev->phy);
802 mt76_tx_status_check(dev, true);
803 mt76_wcid_cleanup(dev, &dev->global_wcid);
804 ieee80211_unregister_hw(hw);
805 }
806 EXPORT_SYMBOL_GPL(mt76_unregister_device);
807
mt76_free_device(struct mt76_dev * dev)808 void mt76_free_device(struct mt76_dev *dev)
809 {
810 mt76_worker_teardown(&dev->tx_worker);
811 if (dev->wq) {
812 destroy_workqueue(dev->wq);
813 dev->wq = NULL;
814 }
815 ieee80211_free_hw(dev->hw);
816 }
817 EXPORT_SYMBOL_GPL(mt76_free_device);
818
819 static struct mt76_phy *
mt76_vif_phy(struct ieee80211_hw * hw,struct ieee80211_vif * vif)820 mt76_vif_phy(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
821 {
822 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
823 struct mt76_chanctx *ctx;
824
825 if (!hw->wiphy->n_radio)
826 return hw->priv;
827
828 if (!mlink->ctx)
829 return NULL;
830
831 ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
832 return ctx->phy;
833 }
834
mt76_rx_release_amsdu(struct mt76_phy * phy,enum mt76_rxq_id q)835 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
836 {
837 struct sk_buff *skb = phy->rx_amsdu[q].head;
838 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
839 struct mt76_dev *dev = phy->dev;
840
841 phy->rx_amsdu[q].head = NULL;
842 phy->rx_amsdu[q].tail = NULL;
843
844 /*
845 * Validate if the amsdu has a proper first subframe.
846 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
847 * flag of the QoS header gets flipped. In such cases, the first
848 * subframe has a LLC/SNAP header in the location of the destination
849 * address.
850 */
851 if (skb_shinfo(skb)->frag_list) {
852 int offset = 0;
853
854 if (!(status->flag & RX_FLAG_8023)) {
855 offset = ieee80211_get_hdrlen_from_skb(skb);
856
857 if ((status->flag &
858 (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
859 RX_FLAG_DECRYPTED)
860 offset += 8;
861 }
862
863 if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
864 dev_kfree_skb(skb);
865 return;
866 }
867 }
868 __skb_queue_tail(&dev->rx_skb[q], skb);
869 }
870
mt76_rx_release_burst(struct mt76_phy * phy,enum mt76_rxq_id q,struct sk_buff * skb)871 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
872 struct sk_buff *skb)
873 {
874 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
875
876 if (phy->rx_amsdu[q].head &&
877 (!status->amsdu || status->first_amsdu ||
878 status->seqno != phy->rx_amsdu[q].seqno))
879 mt76_rx_release_amsdu(phy, q);
880
881 if (!phy->rx_amsdu[q].head) {
882 phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
883 phy->rx_amsdu[q].seqno = status->seqno;
884 phy->rx_amsdu[q].head = skb;
885 } else {
886 *phy->rx_amsdu[q].tail = skb;
887 phy->rx_amsdu[q].tail = &skb->next;
888 }
889
890 if (!status->amsdu || status->last_amsdu)
891 mt76_rx_release_amsdu(phy, q);
892 }
893
mt76_rx(struct mt76_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb)894 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
895 {
896 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
897 struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
898
899 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
900 dev_kfree_skb(skb);
901 return;
902 }
903
904 #ifdef CONFIG_NL80211_TESTMODE
905 if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
906 phy->test.rx_stats.packets[q]++;
907 if (status->flag & RX_FLAG_FAILED_FCS_CRC)
908 phy->test.rx_stats.fcs_error[q]++;
909 }
910 #endif
911
912 mt76_rx_release_burst(phy, q, skb);
913 }
914 EXPORT_SYMBOL_GPL(mt76_rx);
915
mt76_has_tx_pending(struct mt76_phy * phy)916 bool mt76_has_tx_pending(struct mt76_phy *phy)
917 {
918 struct mt76_queue *q;
919 int i;
920
921 for (i = 0; i < __MT_TXQ_MAX; i++) {
922 q = phy->q_tx[i];
923 if (q && q->queued)
924 return true;
925 }
926
927 return false;
928 }
929 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
930
931 static struct mt76_channel_state *
mt76_channel_state(struct mt76_phy * phy,struct ieee80211_channel * c)932 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
933 {
934 struct mt76_sband *msband;
935 int idx;
936
937 if (c->band == NL80211_BAND_2GHZ)
938 msband = &phy->sband_2g;
939 else if (c->band == NL80211_BAND_6GHZ)
940 msband = &phy->sband_6g;
941 else
942 msband = &phy->sband_5g;
943
944 idx = c - &msband->sband.channels[0];
945 return &msband->chan[idx];
946 }
947
mt76_update_survey_active_time(struct mt76_phy * phy,ktime_t time)948 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
949 {
950 struct mt76_channel_state *state = phy->chan_state;
951
952 state->cc_active += ktime_to_us(ktime_sub(time,
953 phy->survey_time));
954 phy->survey_time = time;
955 }
956 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
957
mt76_update_survey(struct mt76_phy * phy)958 void mt76_update_survey(struct mt76_phy *phy)
959 {
960 struct mt76_dev *dev = phy->dev;
961 ktime_t cur_time;
962
963 if (dev->drv->update_survey)
964 dev->drv->update_survey(phy);
965
966 cur_time = ktime_get_boottime();
967 mt76_update_survey_active_time(phy, cur_time);
968
969 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
970 struct mt76_channel_state *state = phy->chan_state;
971
972 spin_lock_bh(&dev->cc_lock);
973 state->cc_bss_rx += dev->cur_cc_bss_rx;
974 dev->cur_cc_bss_rx = 0;
975 spin_unlock_bh(&dev->cc_lock);
976 }
977 }
978 EXPORT_SYMBOL_GPL(mt76_update_survey);
979
__mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)980 int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
981 bool offchannel)
982 {
983 struct mt76_dev *dev = phy->dev;
984 int timeout = HZ / 5;
985 int ret;
986
987 set_bit(MT76_RESET, &phy->state);
988
989 mt76_worker_disable(&dev->tx_worker);
990 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
991 mt76_update_survey(phy);
992
993 if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
994 phy->chandef.width != chandef->width)
995 phy->dfs_state = MT_DFS_STATE_UNKNOWN;
996
997 phy->chandef = *chandef;
998 phy->chan_state = mt76_channel_state(phy, chandef->chan);
999 phy->offchannel = offchannel;
1000
1001 if (!offchannel)
1002 phy->main_chandef = *chandef;
1003
1004 if (chandef->chan != phy->main_chandef.chan)
1005 memset(phy->chan_state, 0, sizeof(*phy->chan_state));
1006
1007 ret = dev->drv->set_channel(phy);
1008
1009 clear_bit(MT76_RESET, &phy->state);
1010 mt76_worker_enable(&dev->tx_worker);
1011 mt76_worker_schedule(&dev->tx_worker);
1012
1013 return ret;
1014 }
1015
mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)1016 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1017 bool offchannel)
1018 {
1019 struct mt76_dev *dev = phy->dev;
1020 int ret;
1021
1022 cancel_delayed_work_sync(&phy->mac_work);
1023
1024 mutex_lock(&dev->mutex);
1025 ret = __mt76_set_channel(phy, chandef, offchannel);
1026 mutex_unlock(&dev->mutex);
1027
1028 return ret;
1029 }
1030
mt76_update_channel(struct mt76_phy * phy)1031 int mt76_update_channel(struct mt76_phy *phy)
1032 {
1033 struct ieee80211_hw *hw = phy->hw;
1034 struct cfg80211_chan_def *chandef = &hw->conf.chandef;
1035 bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
1036
1037 phy->radar_enabled = hw->conf.radar_enabled;
1038
1039 return mt76_set_channel(phy, chandef, offchannel);
1040 }
1041 EXPORT_SYMBOL_GPL(mt76_update_channel);
1042
1043 static struct mt76_sband *
mt76_get_survey_sband(struct mt76_phy * phy,int * idx)1044 mt76_get_survey_sband(struct mt76_phy *phy, int *idx)
1045 {
1046 if (*idx < phy->sband_2g.sband.n_channels)
1047 return &phy->sband_2g;
1048
1049 *idx -= phy->sband_2g.sband.n_channels;
1050 if (*idx < phy->sband_5g.sband.n_channels)
1051 return &phy->sband_5g;
1052
1053 *idx -= phy->sband_5g.sband.n_channels;
1054 if (*idx < phy->sband_6g.sband.n_channels)
1055 return &phy->sband_6g;
1056
1057 *idx -= phy->sband_6g.sband.n_channels;
1058 return NULL;
1059 }
1060
mt76_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)1061 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
1062 struct survey_info *survey)
1063 {
1064 struct mt76_phy *phy = hw->priv;
1065 struct mt76_dev *dev = phy->dev;
1066 struct mt76_sband *sband = NULL;
1067 struct ieee80211_channel *chan;
1068 struct mt76_channel_state *state;
1069 int phy_idx = 0;
1070 int ret = 0;
1071
1072 mutex_lock(&dev->mutex);
1073
1074 for (phy_idx = 0; phy_idx < ARRAY_SIZE(dev->phys); phy_idx++) {
1075 sband = NULL;
1076 phy = dev->phys[phy_idx];
1077 if (!phy || phy->hw != hw)
1078 continue;
1079
1080 sband = mt76_get_survey_sband(phy, &idx);
1081
1082 if (idx == 0 && phy->dev->drv->update_survey)
1083 mt76_update_survey(phy);
1084
1085 if (sband || !hw->wiphy->n_radio)
1086 break;
1087 }
1088
1089 if (!sband) {
1090 ret = -ENOENT;
1091 goto out;
1092 }
1093
1094 chan = &sband->sband.channels[idx];
1095 state = mt76_channel_state(phy, chan);
1096
1097 memset(survey, 0, sizeof(*survey));
1098 survey->channel = chan;
1099 survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
1100 survey->filled |= dev->drv->survey_flags;
1101 if (state->noise)
1102 survey->filled |= SURVEY_INFO_NOISE_DBM;
1103
1104 if (chan == phy->main_chandef.chan) {
1105 survey->filled |= SURVEY_INFO_IN_USE;
1106
1107 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1108 survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1109 }
1110
1111 survey->time_busy = div_u64(state->cc_busy, 1000);
1112 survey->time_rx = div_u64(state->cc_rx, 1000);
1113 survey->time = div_u64(state->cc_active, 1000);
1114 survey->noise = state->noise;
1115
1116 spin_lock_bh(&dev->cc_lock);
1117 survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1118 survey->time_tx = div_u64(state->cc_tx, 1000);
1119 spin_unlock_bh(&dev->cc_lock);
1120
1121 out:
1122 mutex_unlock(&dev->mutex);
1123
1124 return ret;
1125 }
1126 EXPORT_SYMBOL_GPL(mt76_get_survey);
1127
mt76_wcid_key_setup(struct mt76_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key)1128 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1129 struct ieee80211_key_conf *key)
1130 {
1131 struct ieee80211_key_seq seq;
1132 int i;
1133
1134 wcid->rx_check_pn = false;
1135
1136 if (!key)
1137 return;
1138
1139 if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1140 return;
1141
1142 wcid->rx_check_pn = true;
1143
1144 /* data frame */
1145 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1146 ieee80211_get_key_rx_seq(key, i, &seq);
1147 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1148 }
1149
1150 /* robust management frame */
1151 ieee80211_get_key_rx_seq(key, -1, &seq);
1152 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1153
1154 }
1155 EXPORT_SYMBOL(mt76_wcid_key_setup);
1156
mt76_rx_signal(u8 chain_mask,s8 * chain_signal)1157 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1158 {
1159 int signal = -128;
1160 u8 chains;
1161
1162 for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1163 int cur, diff;
1164
1165 cur = *chain_signal;
1166 if (!(chains & BIT(0)) ||
1167 cur > 0)
1168 continue;
1169
1170 if (cur > signal)
1171 swap(cur, signal);
1172
1173 diff = signal - cur;
1174 if (diff == 0)
1175 signal += 3;
1176 else if (diff <= 2)
1177 signal += 2;
1178 else if (diff <= 6)
1179 signal += 1;
1180 }
1181
1182 return signal;
1183 }
1184 EXPORT_SYMBOL(mt76_rx_signal);
1185
1186 static void
mt76_rx_convert(struct mt76_dev * dev,struct sk_buff * skb,struct ieee80211_hw ** hw,struct ieee80211_sta ** sta)1187 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1188 struct ieee80211_hw **hw,
1189 struct ieee80211_sta **sta)
1190 {
1191 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1192 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1193 struct mt76_rx_status mstat;
1194
1195 mstat = *((struct mt76_rx_status *)skb->cb);
1196 memset(status, 0, sizeof(*status));
1197
1198 status->flag = mstat.flag;
1199 status->freq = mstat.freq;
1200 status->enc_flags = mstat.enc_flags;
1201 status->encoding = mstat.encoding;
1202 status->bw = mstat.bw;
1203 if (status->encoding == RX_ENC_EHT) {
1204 status->eht.ru = mstat.eht.ru;
1205 status->eht.gi = mstat.eht.gi;
1206 } else {
1207 status->he_ru = mstat.he_ru;
1208 status->he_gi = mstat.he_gi;
1209 status->he_dcm = mstat.he_dcm;
1210 }
1211 status->rate_idx = mstat.rate_idx;
1212 status->nss = mstat.nss;
1213 status->band = mstat.band;
1214 status->signal = mstat.signal;
1215 status->chains = mstat.chains;
1216 status->ampdu_reference = mstat.ampdu_ref;
1217 status->device_timestamp = mstat.timestamp;
1218 status->mactime = mstat.timestamp;
1219 status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1220 if (status->signal <= -128)
1221 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1222
1223 if (ieee80211_is_beacon(hdr->frame_control) ||
1224 ieee80211_is_probe_resp(hdr->frame_control))
1225 status->boottime_ns = ktime_get_boottime_ns();
1226
1227 BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1228 BUILD_BUG_ON(sizeof(status->chain_signal) !=
1229 sizeof(mstat.chain_signal));
1230 memcpy(status->chain_signal, mstat.chain_signal,
1231 sizeof(mstat.chain_signal));
1232
1233 if (mstat.wcid) {
1234 status->link_valid = mstat.wcid->link_valid;
1235 status->link_id = mstat.wcid->link_id;
1236 }
1237
1238 *sta = wcid_to_sta(mstat.wcid);
1239 *hw = mt76_phy_hw(dev, mstat.phy_idx);
1240 }
1241
1242 static void
mt76_check_ccmp_pn(struct sk_buff * skb)1243 mt76_check_ccmp_pn(struct sk_buff *skb)
1244 {
1245 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1246 struct mt76_wcid *wcid = status->wcid;
1247 struct ieee80211_hdr *hdr;
1248 int security_idx;
1249 int ret;
1250
1251 if (!(status->flag & RX_FLAG_DECRYPTED))
1252 return;
1253
1254 if (status->flag & RX_FLAG_ONLY_MONITOR)
1255 return;
1256
1257 if (!wcid || !wcid->rx_check_pn)
1258 return;
1259
1260 security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1261 if (status->flag & RX_FLAG_8023)
1262 goto skip_hdr_check;
1263
1264 hdr = mt76_skb_get_hdr(skb);
1265 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1266 /*
1267 * Validate the first fragment both here and in mac80211
1268 * All further fragments will be validated by mac80211 only.
1269 */
1270 if (ieee80211_is_frag(hdr) &&
1271 !ieee80211_is_first_frag(hdr->frame_control))
1272 return;
1273 }
1274
1275 /* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1276 *
1277 * the recipient shall maintain a single replay counter for received
1278 * individually addressed robust Management frames that are received
1279 * with the To DS subfield equal to 0, [...]
1280 */
1281 if (ieee80211_is_mgmt(hdr->frame_control) &&
1282 !ieee80211_has_tods(hdr->frame_control))
1283 security_idx = IEEE80211_NUM_TIDS;
1284
1285 skip_hdr_check:
1286 BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1287 ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1288 sizeof(status->iv));
1289 if (ret <= 0) {
1290 status->flag |= RX_FLAG_ONLY_MONITOR;
1291 return;
1292 }
1293
1294 memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1295
1296 if (status->flag & RX_FLAG_IV_STRIPPED)
1297 status->flag |= RX_FLAG_PN_VALIDATED;
1298 }
1299
1300 static void
mt76_airtime_report(struct mt76_dev * dev,struct mt76_rx_status * status,int len)1301 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1302 int len)
1303 {
1304 struct mt76_wcid *wcid = status->wcid;
1305 struct ieee80211_rx_status info = {
1306 .enc_flags = status->enc_flags,
1307 .rate_idx = status->rate_idx,
1308 .encoding = status->encoding,
1309 .band = status->band,
1310 .nss = status->nss,
1311 .bw = status->bw,
1312 };
1313 struct ieee80211_sta *sta;
1314 u32 airtime;
1315 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1316
1317 airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1318 spin_lock(&dev->cc_lock);
1319 dev->cur_cc_bss_rx += airtime;
1320 spin_unlock(&dev->cc_lock);
1321
1322 if (!wcid || !wcid->sta)
1323 return;
1324
1325 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1326 ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1327 }
1328
1329 static void
mt76_airtime_flush_ampdu(struct mt76_dev * dev)1330 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1331 {
1332 struct mt76_wcid *wcid;
1333 int wcid_idx;
1334
1335 if (!dev->rx_ampdu_len)
1336 return;
1337
1338 wcid_idx = dev->rx_ampdu_status.wcid_idx;
1339 if (wcid_idx < ARRAY_SIZE(dev->wcid))
1340 wcid = rcu_dereference(dev->wcid[wcid_idx]);
1341 else
1342 wcid = NULL;
1343 dev->rx_ampdu_status.wcid = wcid;
1344
1345 mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1346
1347 dev->rx_ampdu_len = 0;
1348 dev->rx_ampdu_ref = 0;
1349 }
1350
1351 static void
mt76_airtime_check(struct mt76_dev * dev,struct sk_buff * skb)1352 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1353 {
1354 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1355 struct mt76_wcid *wcid = status->wcid;
1356
1357 if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1358 return;
1359
1360 if (!wcid || !wcid->sta) {
1361 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1362
1363 if (status->flag & RX_FLAG_8023)
1364 return;
1365
1366 if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1367 return;
1368
1369 wcid = NULL;
1370 }
1371
1372 if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1373 status->ampdu_ref != dev->rx_ampdu_ref)
1374 mt76_airtime_flush_ampdu(dev);
1375
1376 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1377 if (!dev->rx_ampdu_len ||
1378 status->ampdu_ref != dev->rx_ampdu_ref) {
1379 dev->rx_ampdu_status = *status;
1380 dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1381 dev->rx_ampdu_ref = status->ampdu_ref;
1382 }
1383
1384 dev->rx_ampdu_len += skb->len;
1385 return;
1386 }
1387
1388 mt76_airtime_report(dev, status, skb->len);
1389 }
1390
1391 static void
mt76_check_sta(struct mt76_dev * dev,struct sk_buff * skb)1392 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1393 {
1394 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1395 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1396 struct ieee80211_sta *sta;
1397 struct ieee80211_hw *hw;
1398 struct mt76_wcid *wcid = status->wcid;
1399 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1400 bool ps;
1401
1402 hw = mt76_phy_hw(dev, status->phy_idx);
1403 if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1404 !(status->flag & RX_FLAG_8023)) {
1405 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1406 if (sta)
1407 wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1408 }
1409
1410 mt76_airtime_check(dev, skb);
1411
1412 if (!wcid || !wcid->sta)
1413 return;
1414
1415 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1416
1417 if (status->signal <= 0)
1418 ewma_signal_add(&wcid->rssi, -status->signal);
1419
1420 wcid->inactive_count = 0;
1421
1422 if (status->flag & RX_FLAG_8023)
1423 return;
1424
1425 if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1426 return;
1427
1428 if (ieee80211_is_pspoll(hdr->frame_control)) {
1429 ieee80211_sta_pspoll(sta);
1430 return;
1431 }
1432
1433 if (ieee80211_has_morefrags(hdr->frame_control) ||
1434 !(ieee80211_is_mgmt(hdr->frame_control) ||
1435 ieee80211_is_data(hdr->frame_control)))
1436 return;
1437
1438 ps = ieee80211_has_pm(hdr->frame_control);
1439
1440 if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1441 ieee80211_is_qos_nullfunc(hdr->frame_control)))
1442 ieee80211_sta_uapsd_trigger(sta, tidno);
1443
1444 if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1445 return;
1446
1447 if (ps)
1448 set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1449
1450 if (dev->drv->sta_ps)
1451 dev->drv->sta_ps(dev, sta, ps);
1452
1453 if (!ps)
1454 clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1455
1456 ieee80211_sta_ps_transition(sta, ps);
1457 }
1458
mt76_rx_complete(struct mt76_dev * dev,struct sk_buff_head * frames,struct napi_struct * napi)1459 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1460 struct napi_struct *napi)
1461 {
1462 struct ieee80211_sta *sta;
1463 struct ieee80211_hw *hw;
1464 struct sk_buff *skb, *tmp;
1465 LIST_HEAD(list);
1466
1467 spin_lock(&dev->rx_lock);
1468 while ((skb = __skb_dequeue(frames)) != NULL) {
1469 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1470
1471 mt76_check_ccmp_pn(skb);
1472 skb_shinfo(skb)->frag_list = NULL;
1473 mt76_rx_convert(dev, skb, &hw, &sta);
1474 ieee80211_rx_list(hw, sta, skb, &list);
1475
1476 /* subsequent amsdu frames */
1477 while (nskb) {
1478 skb = nskb;
1479 nskb = nskb->next;
1480 skb->next = NULL;
1481
1482 mt76_rx_convert(dev, skb, &hw, &sta);
1483 ieee80211_rx_list(hw, sta, skb, &list);
1484 }
1485 }
1486 spin_unlock(&dev->rx_lock);
1487
1488 if (!napi) {
1489 netif_receive_skb_list(&list);
1490 return;
1491 }
1492
1493 list_for_each_entry_safe(skb, tmp, &list, list) {
1494 skb_list_del_init(skb);
1495 napi_gro_receive(napi, skb);
1496 }
1497 }
1498
mt76_rx_poll_complete(struct mt76_dev * dev,enum mt76_rxq_id q,struct napi_struct * napi)1499 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1500 struct napi_struct *napi)
1501 {
1502 struct sk_buff_head frames;
1503 struct sk_buff *skb;
1504
1505 __skb_queue_head_init(&frames);
1506
1507 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1508 mt76_check_sta(dev, skb);
1509 if (mtk_wed_device_active(&dev->mmio.wed))
1510 __skb_queue_tail(&frames, skb);
1511 else
1512 mt76_rx_aggr_reorder(skb, &frames);
1513 }
1514
1515 mt76_rx_complete(dev, &frames, napi);
1516 }
1517 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1518
1519 static int
mt76_sta_add(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1520 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1521 struct ieee80211_sta *sta)
1522 {
1523 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1524 struct mt76_dev *dev = phy->dev;
1525 int ret;
1526 int i;
1527
1528 mutex_lock(&dev->mutex);
1529
1530 ret = dev->drv->sta_add(dev, vif, sta);
1531 if (ret)
1532 goto out;
1533
1534 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1535 struct mt76_txq *mtxq;
1536
1537 if (!sta->txq[i])
1538 continue;
1539
1540 mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1541 mtxq->wcid = wcid->idx;
1542 }
1543
1544 ewma_signal_init(&wcid->rssi);
1545 rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1546 phy->num_sta++;
1547
1548 mt76_wcid_init(wcid, phy->band_idx);
1549 out:
1550 mutex_unlock(&dev->mutex);
1551
1552 return ret;
1553 }
1554
__mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1555 void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1556 struct ieee80211_sta *sta)
1557 {
1558 struct mt76_dev *dev = phy->dev;
1559 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1560 int i, idx = wcid->idx;
1561
1562 for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1563 mt76_rx_aggr_stop(dev, wcid, i);
1564
1565 if (dev->drv->sta_remove)
1566 dev->drv->sta_remove(dev, vif, sta);
1567
1568 mt76_wcid_cleanup(dev, wcid);
1569
1570 mt76_wcid_mask_clear(dev->wcid_mask, idx);
1571 phy->num_sta--;
1572 }
1573 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1574
1575 static void
mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1576 mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1577 struct ieee80211_sta *sta)
1578 {
1579 struct mt76_dev *dev = phy->dev;
1580
1581 mutex_lock(&dev->mutex);
1582 __mt76_sta_remove(phy, vif, sta);
1583 mutex_unlock(&dev->mutex);
1584 }
1585
mt76_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)1586 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1587 struct ieee80211_sta *sta,
1588 enum ieee80211_sta_state old_state,
1589 enum ieee80211_sta_state new_state)
1590 {
1591 struct mt76_phy *phy = hw->priv;
1592 struct mt76_dev *dev = phy->dev;
1593 enum mt76_sta_event ev;
1594
1595 phy = mt76_vif_phy(hw, vif);
1596 if (!phy)
1597 return -EINVAL;
1598
1599 if (old_state == IEEE80211_STA_NOTEXIST &&
1600 new_state == IEEE80211_STA_NONE)
1601 return mt76_sta_add(phy, vif, sta);
1602
1603 if (old_state == IEEE80211_STA_NONE &&
1604 new_state == IEEE80211_STA_NOTEXIST)
1605 mt76_sta_remove(phy, vif, sta);
1606
1607 if (!dev->drv->sta_event)
1608 return 0;
1609
1610 if (old_state == IEEE80211_STA_AUTH &&
1611 new_state == IEEE80211_STA_ASSOC)
1612 ev = MT76_STA_EVENT_ASSOC;
1613 else if (old_state == IEEE80211_STA_ASSOC &&
1614 new_state == IEEE80211_STA_AUTHORIZED)
1615 ev = MT76_STA_EVENT_AUTHORIZE;
1616 else if (old_state == IEEE80211_STA_ASSOC &&
1617 new_state == IEEE80211_STA_AUTH)
1618 ev = MT76_STA_EVENT_DISASSOC;
1619 else
1620 return 0;
1621
1622 return dev->drv->sta_event(dev, vif, sta, ev);
1623 }
1624 EXPORT_SYMBOL_GPL(mt76_sta_state);
1625
mt76_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1626 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1627 struct ieee80211_sta *sta)
1628 {
1629 struct mt76_phy *phy = hw->priv;
1630 struct mt76_dev *dev = phy->dev;
1631 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1632
1633 mutex_lock(&dev->mutex);
1634 spin_lock_bh(&dev->status_lock);
1635 rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1636 spin_unlock_bh(&dev->status_lock);
1637 mutex_unlock(&dev->mutex);
1638 }
1639 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1640
mt76_wcid_init(struct mt76_wcid * wcid,u8 band_idx)1641 void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx)
1642 {
1643 wcid->hw_key_idx = -1;
1644 wcid->phy_idx = band_idx;
1645
1646 INIT_LIST_HEAD(&wcid->tx_list);
1647 skb_queue_head_init(&wcid->tx_pending);
1648 skb_queue_head_init(&wcid->tx_offchannel);
1649
1650 INIT_LIST_HEAD(&wcid->list);
1651 idr_init(&wcid->pktid);
1652
1653 INIT_LIST_HEAD(&wcid->poll_list);
1654 }
1655 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1656
mt76_wcid_cleanup(struct mt76_dev * dev,struct mt76_wcid * wcid)1657 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1658 {
1659 struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
1660 struct ieee80211_hw *hw;
1661 struct sk_buff_head list;
1662 struct sk_buff *skb;
1663
1664 mt76_tx_status_lock(dev, &list);
1665 mt76_tx_status_skb_get(dev, wcid, -1, &list);
1666 mt76_tx_status_unlock(dev, &list);
1667
1668 idr_destroy(&wcid->pktid);
1669
1670 spin_lock_bh(&phy->tx_lock);
1671
1672 if (!list_empty(&wcid->tx_list))
1673 list_del_init(&wcid->tx_list);
1674
1675 spin_lock(&wcid->tx_pending.lock);
1676 skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1677 spin_unlock(&wcid->tx_pending.lock);
1678
1679 spin_unlock_bh(&phy->tx_lock);
1680
1681 while ((skb = __skb_dequeue(&list)) != NULL) {
1682 hw = mt76_tx_status_get_hw(dev, skb);
1683 ieee80211_free_txskb(hw, skb);
1684 }
1685 }
1686 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1687
mt76_wcid_add_poll(struct mt76_dev * dev,struct mt76_wcid * wcid)1688 void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
1689 {
1690 if (test_bit(MT76_MCU_RESET, &dev->phy.state))
1691 return;
1692
1693 spin_lock_bh(&dev->sta_poll_lock);
1694 if (list_empty(&wcid->poll_list))
1695 list_add_tail(&wcid->poll_list, &dev->sta_poll_list);
1696 spin_unlock_bh(&dev->sta_poll_lock);
1697 }
1698 EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
1699
mt76_get_txpower(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,int * dbm)1700 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1701 unsigned int link_id, int *dbm)
1702 {
1703 struct mt76_phy *phy = mt76_vif_phy(hw, vif);
1704 int n_chains, delta;
1705
1706 if (!phy)
1707 return -EINVAL;
1708
1709 n_chains = hweight16(phy->chainmask);
1710 delta = mt76_tx_power_nss_delta(n_chains);
1711 *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1712
1713 return 0;
1714 }
1715 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1716
mt76_init_sar_power(struct ieee80211_hw * hw,const struct cfg80211_sar_specs * sar)1717 int mt76_init_sar_power(struct ieee80211_hw *hw,
1718 const struct cfg80211_sar_specs *sar)
1719 {
1720 struct mt76_phy *phy = hw->priv;
1721 const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1722 int i;
1723
1724 if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1725 return -EINVAL;
1726
1727 for (i = 0; i < sar->num_sub_specs; i++) {
1728 u32 index = sar->sub_specs[i].freq_range_index;
1729 /* SAR specifies power limitaton in 0.25dbm */
1730 s32 power = sar->sub_specs[i].power >> 1;
1731
1732 if (power > 127 || power < -127)
1733 power = 127;
1734
1735 phy->frp[index].range = &capa->freq_ranges[index];
1736 phy->frp[index].power = power;
1737 }
1738
1739 return 0;
1740 }
1741 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1742
mt76_get_sar_power(struct mt76_phy * phy,struct ieee80211_channel * chan,int power)1743 int mt76_get_sar_power(struct mt76_phy *phy,
1744 struct ieee80211_channel *chan,
1745 int power)
1746 {
1747 const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1748 int freq, i;
1749
1750 if (!capa || !phy->frp)
1751 return power;
1752
1753 if (power > 127 || power < -127)
1754 power = 127;
1755
1756 freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1757 for (i = 0 ; i < capa->num_freq_ranges; i++) {
1758 if (phy->frp[i].range &&
1759 freq >= phy->frp[i].range->start_freq &&
1760 freq < phy->frp[i].range->end_freq) {
1761 power = min_t(int, phy->frp[i].power, power);
1762 break;
1763 }
1764 }
1765
1766 return power;
1767 }
1768 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1769
1770 static void
__mt76_csa_finish(void * priv,u8 * mac,struct ieee80211_vif * vif)1771 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1772 {
1773 if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1774 ieee80211_csa_finish(vif, 0);
1775 }
1776
mt76_csa_finish(struct mt76_dev * dev)1777 void mt76_csa_finish(struct mt76_dev *dev)
1778 {
1779 if (!dev->csa_complete)
1780 return;
1781
1782 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1783 IEEE80211_IFACE_ITER_RESUME_ALL,
1784 __mt76_csa_finish, dev);
1785
1786 dev->csa_complete = 0;
1787 }
1788 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1789
1790 static void
__mt76_csa_check(void * priv,u8 * mac,struct ieee80211_vif * vif)1791 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1792 {
1793 struct mt76_dev *dev = priv;
1794
1795 if (!vif->bss_conf.csa_active)
1796 return;
1797
1798 dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1799 }
1800
mt76_csa_check(struct mt76_dev * dev)1801 void mt76_csa_check(struct mt76_dev *dev)
1802 {
1803 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1804 IEEE80211_IFACE_ITER_RESUME_ALL,
1805 __mt76_csa_check, dev);
1806 }
1807 EXPORT_SYMBOL_GPL(mt76_csa_check);
1808
1809 int
mt76_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)1810 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1811 {
1812 return 0;
1813 }
1814 EXPORT_SYMBOL_GPL(mt76_set_tim);
1815
mt76_insert_ccmp_hdr(struct sk_buff * skb,u8 key_id)1816 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1817 {
1818 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1819 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1820 u8 *hdr, *pn = status->iv;
1821
1822 __skb_push(skb, 8);
1823 memmove(skb->data, skb->data + 8, hdr_len);
1824 hdr = skb->data + hdr_len;
1825
1826 hdr[0] = pn[5];
1827 hdr[1] = pn[4];
1828 hdr[2] = 0;
1829 hdr[3] = 0x20 | (key_id << 6);
1830 hdr[4] = pn[3];
1831 hdr[5] = pn[2];
1832 hdr[6] = pn[1];
1833 hdr[7] = pn[0];
1834
1835 status->flag &= ~RX_FLAG_IV_STRIPPED;
1836 }
1837 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1838
mt76_get_rate(struct mt76_dev * dev,struct ieee80211_supported_band * sband,int idx,bool cck)1839 int mt76_get_rate(struct mt76_dev *dev,
1840 struct ieee80211_supported_band *sband,
1841 int idx, bool cck)
1842 {
1843 bool is_2g = sband->band == NL80211_BAND_2GHZ;
1844 int i, offset = 0, len = sband->n_bitrates;
1845
1846 if (cck) {
1847 if (!is_2g)
1848 return 0;
1849
1850 idx &= ~BIT(2); /* short preamble */
1851 } else if (is_2g) {
1852 offset = 4;
1853 }
1854
1855 for (i = offset; i < len; i++) {
1856 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1857 return i;
1858 }
1859
1860 return 0;
1861 }
1862 EXPORT_SYMBOL_GPL(mt76_get_rate);
1863
mt76_sw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const u8 * mac)1864 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1865 const u8 *mac)
1866 {
1867 struct mt76_phy *phy = hw->priv;
1868
1869 set_bit(MT76_SCANNING, &phy->state);
1870 }
1871 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1872
mt76_sw_scan_complete(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1873 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1874 {
1875 struct mt76_phy *phy = hw->priv;
1876
1877 clear_bit(MT76_SCANNING, &phy->state);
1878 }
1879 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1880
mt76_get_antenna(struct ieee80211_hw * hw,u32 * tx_ant,u32 * rx_ant)1881 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1882 {
1883 struct mt76_phy *phy = hw->priv;
1884 struct mt76_dev *dev = phy->dev;
1885 int i;
1886
1887 mutex_lock(&dev->mutex);
1888 *tx_ant = 0;
1889 for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
1890 if (dev->phys[i] && dev->phys[i]->hw == hw)
1891 *tx_ant |= dev->phys[i]->chainmask;
1892 *rx_ant = *tx_ant;
1893 mutex_unlock(&dev->mutex);
1894
1895 return 0;
1896 }
1897 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1898
1899 struct mt76_queue *
mt76_init_queue(struct mt76_dev * dev,int qid,int idx,int n_desc,int ring_base,void * wed,u32 flags)1900 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1901 int ring_base, void *wed, u32 flags)
1902 {
1903 struct mt76_queue *hwq;
1904 int err;
1905
1906 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1907 if (!hwq)
1908 return ERR_PTR(-ENOMEM);
1909
1910 hwq->flags = flags;
1911 hwq->wed = wed;
1912
1913 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1914 if (err < 0)
1915 return ERR_PTR(err);
1916
1917 return hwq;
1918 }
1919 EXPORT_SYMBOL_GPL(mt76_init_queue);
1920
mt76_ethtool_worker(struct mt76_ethtool_worker_info * wi,struct mt76_sta_stats * stats,bool eht)1921 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1922 struct mt76_sta_stats *stats, bool eht)
1923 {
1924 int i, ei = wi->initial_stat_idx;
1925 u64 *data = wi->data;
1926
1927 wi->sta_count++;
1928
1929 data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1930 data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1931 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1932 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1933 data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1934 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1935 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1936 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1937 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1938 if (eht) {
1939 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1940 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1941 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1942 }
1943
1944 for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1945 data[ei++] += stats->tx_bw[i];
1946
1947 for (i = 0; i < (eht ? 14 : 12); i++)
1948 data[ei++] += stats->tx_mcs[i];
1949
1950 for (i = 0; i < 4; i++)
1951 data[ei++] += stats->tx_nss[i];
1952
1953 wi->worker_stat_count = ei - wi->initial_stat_idx;
1954 }
1955 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1956
mt76_ethtool_page_pool_stats(struct mt76_dev * dev,u64 * data,int * index)1957 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1958 {
1959 #ifdef CONFIG_PAGE_POOL_STATS
1960 struct page_pool_stats stats = {};
1961 int i;
1962
1963 mt76_for_each_q_rx(dev, i)
1964 page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1965
1966 page_pool_ethtool_stats_get(data, &stats);
1967 *index += page_pool_ethtool_stats_get_count();
1968 #endif
1969 }
1970 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1971
mt76_phy_dfs_state(struct mt76_phy * phy)1972 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1973 {
1974 struct ieee80211_hw *hw = phy->hw;
1975 struct mt76_dev *dev = phy->dev;
1976
1977 if (dev->region == NL80211_DFS_UNSET ||
1978 test_bit(MT76_SCANNING, &phy->state))
1979 return MT_DFS_STATE_DISABLED;
1980
1981 if (!phy->radar_enabled) {
1982 if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1983 (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1984 return MT_DFS_STATE_ACTIVE;
1985
1986 return MT_DFS_STATE_DISABLED;
1987 }
1988
1989 if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1990 return MT_DFS_STATE_CAC;
1991
1992 return MT_DFS_STATE_ACTIVE;
1993 }
1994 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1995
mt76_vif_cleanup(struct mt76_dev * dev,struct ieee80211_vif * vif)1996 void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
1997 {
1998 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
1999 struct mt76_vif_data *mvif = mlink->mvif;
2000
2001 rcu_assign_pointer(mvif->link[0], NULL);
2002 mt76_abort_scan(dev);
2003 if (mvif->roc_phy)
2004 mt76_abort_roc(mvif->roc_phy);
2005 }
2006 EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
2007