1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020 Realtek Corporation
3 */
4
5 #include <linux/if_arp.h>
6 #include "cam.h"
7 #include "chan.h"
8 #include "coex.h"
9 #include "debug.h"
10 #include "fw.h"
11 #include "mac.h"
12 #include "phy.h"
13 #include "ps.h"
14 #include "reg.h"
15 #include "util.h"
16 #include "wow.h"
17
18 struct rtw89_eapol_2_of_2 {
19 u8 gtkbody[14];
20 u8 key_des_ver;
21 u8 rsvd[92];
22 } __packed;
23
24 struct rtw89_sa_query {
25 u8 category;
26 u8 action;
27 } __packed;
28
29 struct rtw89_arp_rsp {
30 u8 llc_hdr[sizeof(rfc1042_header)];
31 __be16 llc_type;
32 struct arphdr arp_hdr;
33 u8 sender_hw[ETH_ALEN];
34 __be32 sender_ip;
35 u8 target_hw[ETH_ALEN];
36 __be32 target_ip;
37 } __packed;
38
39 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
40
41 union rtw89_fw_element_arg {
42 size_t offset;
43 enum rtw89_rf_path rf_path;
44 enum rtw89_fw_type fw_type;
45 };
46
47 struct rtw89_fw_element_handler {
48 int (*fn)(struct rtw89_dev *rtwdev,
49 const struct rtw89_fw_element_hdr *elm,
50 const union rtw89_fw_element_arg arg);
51 const union rtw89_fw_element_arg arg;
52 const char *name;
53 };
54
55 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
56 struct sk_buff *skb);
57 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
58 struct rtw89_wait_info *wait, unsigned int cond);
59 static int __parse_security_section(struct rtw89_dev *rtwdev,
60 struct rtw89_fw_bin_info *info,
61 struct rtw89_fw_hdr_section_info *section_info,
62 const void *content,
63 u32 *mssc_len);
64
rtw89_fw_h2c_alloc_skb(struct rtw89_dev * rtwdev,u32 len,bool header)65 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
66 bool header)
67 {
68 struct sk_buff *skb;
69 u32 header_len = 0;
70 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size;
71
72 if (header)
73 header_len = H2C_HEADER_LEN;
74
75 skb = dev_alloc_skb(len + header_len + h2c_desc_size);
76 if (!skb)
77 return NULL;
78 skb_reserve(skb, header_len + h2c_desc_size);
79 memset(skb->data, 0, len);
80
81 return skb;
82 }
83
rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev * rtwdev,u32 len)84 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len)
85 {
86 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true);
87 }
88
rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev * rtwdev,u32 len)89 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len)
90 {
91 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false);
92 }
93
rtw89_fw_check_rdy(struct rtw89_dev * rtwdev,enum rtw89_fwdl_check_type type)94 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type)
95 {
96 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
97 u8 val;
98 int ret;
99
100 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val,
101 val == RTW89_FWDL_WCPU_FW_INIT_RDY,
102 1, FWDL_WAIT_CNT, false, rtwdev, type);
103 if (ret) {
104 switch (val) {
105 case RTW89_FWDL_CHECKSUM_FAIL:
106 rtw89_err(rtwdev, "fw checksum fail\n");
107 return -EINVAL;
108
109 case RTW89_FWDL_SECURITY_FAIL:
110 rtw89_err(rtwdev, "fw security fail\n");
111 return -EINVAL;
112
113 case RTW89_FWDL_CV_NOT_MATCH:
114 rtw89_err(rtwdev, "fw cv not match\n");
115 return -EINVAL;
116
117 default:
118 rtw89_err(rtwdev, "fw unexpected status %d\n", val);
119 return -EBUSY;
120 }
121 }
122
123 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
124
125 return 0;
126 }
127
rtw89_fw_hdr_parser_v0(struct rtw89_dev * rtwdev,const u8 * fw,u32 len,struct rtw89_fw_bin_info * info)128 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
129 struct rtw89_fw_bin_info *info)
130 {
131 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw;
132 const struct rtw89_chip_info *chip = rtwdev->chip;
133 struct rtw89_fw_hdr_section_info *section_info;
134 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
135 const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
136 const struct rtw89_fw_hdr_section *section;
137 const u8 *fw_end = fw + len;
138 const u8 *bin;
139 u32 base_hdr_len;
140 u32 mssc_len;
141 int ret;
142 u32 i;
143
144 if (!info)
145 return -EINVAL;
146
147 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM);
148 base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
149 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR);
150 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE);
151
152 if (info->dynamic_hdr_en) {
153 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN);
154 info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
155 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
156 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
157 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
158 return -EINVAL;
159 }
160 } else {
161 info->hdr_len = base_hdr_len;
162 info->dynamic_hdr_len = 0;
163 }
164
165 bin = fw + info->hdr_len;
166
167 /* jump to section header */
168 section_info = info->section_info;
169 for (i = 0; i < info->section_num; i++) {
170 section = &fw_hdr->sections[i];
171 section_info->type =
172 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE);
173 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE);
174
175 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM))
176 section_info->len += FWDL_SECTION_CHKSUM_LEN;
177 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL);
178 section_info->dladdr =
179 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff;
180 section_info->addr = bin;
181
182 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
183 section_info->mssc =
184 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC);
185
186 ret = __parse_security_section(rtwdev, info, section_info,
187 bin, &mssc_len);
188 if (ret)
189 return ret;
190
191 if (sec->secure_boot && chip->chip_id == RTL8852B)
192 section_info->len_override = 960;
193 } else {
194 section_info->mssc = 0;
195 mssc_len = 0;
196 }
197
198 rtw89_debug(rtwdev, RTW89_DBG_FW,
199 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n",
200 i, section_info->type, section_info->len,
201 section_info->mssc, mssc_len, bin - fw);
202 rtw89_debug(rtwdev, RTW89_DBG_FW,
203 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n",
204 section_info->ignore, section_info->key_addr,
205 section_info->key_addr ?
206 section_info->key_addr - section_info->addr : 0,
207 section_info->key_len, section_info->key_idx);
208
209 bin += section_info->len + mssc_len;
210 section_info++;
211 }
212
213 if (fw_end != bin) {
214 rtw89_err(rtwdev, "[ERR]fw bin size\n");
215 return -EINVAL;
216 }
217
218 return 0;
219 }
220
__get_mssc_key_idx(struct rtw89_dev * rtwdev,const struct rtw89_fw_mss_pool_hdr * mss_hdr,u32 rmp_tbl_size,u32 * key_idx)221 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev,
222 const struct rtw89_fw_mss_pool_hdr *mss_hdr,
223 u32 rmp_tbl_size, u32 *key_idx)
224 {
225 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
226 u32 sel_byte_idx;
227 u32 mss_sel_idx;
228 u8 sel_bit_idx;
229 int i;
230
231 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) {
232 if (!mss_hdr->defen)
233 return -ENOENT;
234
235 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
236 sec->mss_key_num;
237 } else {
238 if (mss_hdr->defen)
239 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3;
240 else
241 mss_sel_idx = 0;
242 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) *
243 le16_to_cpu(mss_hdr->msscust_max) +
244 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
245 sec->mss_key_num;
246 }
247
248 sel_byte_idx = mss_sel_idx >> 3;
249 sel_bit_idx = mss_sel_idx & 0x7;
250
251 if (sel_byte_idx >= rmp_tbl_size)
252 return -EFAULT;
253
254 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx)))
255 return -ENOENT;
256
257 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1));
258
259 for (i = 0; i < sel_byte_idx; i++)
260 *key_idx += hweight8(mss_hdr->rmp_tbl[i]);
261
262 return 0;
263 }
264
__parse_formatted_mssc(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr_section_info * section_info,const void * content,u32 * mssc_len)265 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev,
266 struct rtw89_fw_bin_info *info,
267 struct rtw89_fw_hdr_section_info *section_info,
268 const void *content,
269 u32 *mssc_len)
270 {
271 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len;
272 const union rtw89_fw_section_mssc_content *section_content = content;
273 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
274 u32 rmp_tbl_size;
275 u32 key_sign_len;
276 u32 real_key_idx;
277 u32 sb_sel_ver;
278 int ret;
279
280 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) {
281 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n");
282 return -ENOENT;
283 }
284
285 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) {
286 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) *
287 le16_to_cpu(mss_hdr->msscust_max) *
288 mss_hdr->mssdev_max) >> 3;
289 if (mss_hdr->defen)
290 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE;
291 } else {
292 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n",
293 mss_hdr->rmpfmt);
294 return -EINVAL;
295 }
296
297 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) {
298 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n",
299 rmp_tbl_size, (int)sizeof(*mss_hdr),
300 le32_to_cpu(mss_hdr->key_raw_offset));
301 return -EINVAL;
302 }
303
304 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2;
305 if (!key_sign_len)
306 key_sign_len = 512;
307
308 if (info->dsp_checksum)
309 key_sign_len += FWDL_SECURITY_CHKSUM_LEN;
310
311 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size +
312 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len;
313
314 if (!sec->secure_boot)
315 goto out;
316
317 sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v);
318 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn)
319 goto ignore;
320
321 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx);
322 if (ret)
323 goto ignore;
324
325 section_info->key_addr = content + section_info->len +
326 le32_to_cpu(mss_hdr->key_raw_offset) +
327 key_sign_len * real_key_idx;
328 section_info->key_len = key_sign_len;
329 section_info->key_idx = real_key_idx;
330
331 out:
332 if (info->secure_section_exist) {
333 section_info->ignore = true;
334 return 0;
335 }
336
337 info->secure_section_exist = true;
338
339 return 0;
340
341 ignore:
342 section_info->ignore = true;
343
344 return 0;
345 }
346
__parse_security_section(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr_section_info * section_info,const void * content,u32 * mssc_len)347 static int __parse_security_section(struct rtw89_dev *rtwdev,
348 struct rtw89_fw_bin_info *info,
349 struct rtw89_fw_hdr_section_info *section_info,
350 const void *content,
351 u32 *mssc_len)
352 {
353 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
354 int ret;
355
356 if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) {
357 ret = __parse_formatted_mssc(rtwdev, info, section_info,
358 content, mssc_len);
359 if (ret)
360 return -EINVAL;
361 } else {
362 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN;
363 if (info->dsp_checksum)
364 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN;
365
366 if (sec->secure_boot) {
367 if (sec->mss_idx >= section_info->mssc)
368 return -EFAULT;
369 section_info->key_addr = content + section_info->len +
370 sec->mss_idx * FWDL_SECURITY_SIGLEN;
371 section_info->key_len = FWDL_SECURITY_SIGLEN;
372 }
373
374 info->secure_section_exist = true;
375 }
376
377 return 0;
378 }
379
rtw89_fw_hdr_parser_v1(struct rtw89_dev * rtwdev,const u8 * fw,u32 len,struct rtw89_fw_bin_info * info)380 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
381 struct rtw89_fw_bin_info *info)
382 {
383 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw;
384 struct rtw89_fw_hdr_section_info *section_info;
385 const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
386 const struct rtw89_fw_hdr_section_v1 *section;
387 const u8 *fw_end = fw + len;
388 const u8 *bin;
389 u32 base_hdr_len;
390 u32 mssc_len;
391 int ret;
392 u32 i;
393
394 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM);
395 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM);
396 base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
397 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR);
398 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE);
399
400 if (info->dynamic_hdr_en) {
401 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE);
402 info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
403 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
404 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
405 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
406 return -EINVAL;
407 }
408 } else {
409 info->hdr_len = base_hdr_len;
410 info->dynamic_hdr_len = 0;
411 }
412
413 bin = fw + info->hdr_len;
414
415 /* jump to section header */
416 section_info = info->section_info;
417 for (i = 0; i < info->section_num; i++) {
418 section = &fw_hdr->sections[i];
419
420 section_info->type =
421 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE);
422 section_info->len =
423 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE);
424 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM))
425 section_info->len += FWDL_SECTION_CHKSUM_LEN;
426 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL);
427 section_info->dladdr =
428 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR);
429 section_info->addr = bin;
430
431 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
432 section_info->mssc =
433 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
434
435 ret = __parse_security_section(rtwdev, info, section_info,
436 bin, &mssc_len);
437 if (ret)
438 return ret;
439 } else {
440 section_info->mssc = 0;
441 mssc_len = 0;
442 }
443
444 rtw89_debug(rtwdev, RTW89_DBG_FW,
445 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n",
446 i, section_info->type, section_info->len,
447 section_info->mssc, mssc_len, bin - fw);
448 rtw89_debug(rtwdev, RTW89_DBG_FW,
449 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n",
450 section_info->ignore, section_info->key_addr,
451 section_info->key_addr ?
452 section_info->key_addr - section_info->addr : 0,
453 section_info->key_len, section_info->key_idx);
454
455 bin += section_info->len + mssc_len;
456 section_info++;
457 }
458
459 if (fw_end != bin) {
460 rtw89_err(rtwdev, "[ERR]fw bin size\n");
461 return -EINVAL;
462 }
463
464 if (!info->secure_section_exist)
465 rtw89_warn(rtwdev, "no firmware secure section\n");
466
467 return 0;
468 }
469
rtw89_fw_hdr_parser(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)470 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev,
471 const struct rtw89_fw_suit *fw_suit,
472 struct rtw89_fw_bin_info *info)
473 {
474 const u8 *fw = fw_suit->data;
475 u32 len = fw_suit->size;
476
477 if (!fw || !len) {
478 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type);
479 return -ENOENT;
480 }
481
482 switch (fw_suit->hdr_ver) {
483 case 0:
484 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info);
485 case 1:
486 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info);
487 default:
488 return -ENOENT;
489 }
490 }
491
492 static
rtw89_mfw_recognize(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,struct rtw89_fw_suit * fw_suit,bool nowarn)493 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
494 struct rtw89_fw_suit *fw_suit, bool nowarn)
495 {
496 struct rtw89_fw_info *fw_info = &rtwdev->fw;
497 const struct firmware *firmware = fw_info->req.firmware;
498 const u8 *mfw = firmware->data;
499 u32 mfw_len = firmware->size;
500 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
501 const struct rtw89_mfw_info *mfw_info = NULL, *tmp;
502 int i;
503
504 if (mfw_hdr->sig != RTW89_MFW_SIG) {
505 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
506 /* legacy firmware support normal type only */
507 if (type != RTW89_FW_NORMAL)
508 return -EINVAL;
509 fw_suit->data = mfw;
510 fw_suit->size = mfw_len;
511 return 0;
512 }
513
514 for (i = 0; i < mfw_hdr->fw_nr; i++) {
515 tmp = &mfw_hdr->info[i];
516 if (tmp->type != type)
517 continue;
518
519 if (type == RTW89_FW_LOGFMT) {
520 mfw_info = tmp;
521 goto found;
522 }
523
524 /* Version order of WiFi firmware in firmware file are not in order,
525 * pass all firmware to find the equal or less but closest version.
526 */
527 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) {
528 if (!mfw_info || mfw_info->cv < tmp->cv)
529 mfw_info = tmp;
530 }
531 }
532
533 if (mfw_info)
534 goto found;
535
536 if (!nowarn)
537 rtw89_err(rtwdev, "no suitable firmware found\n");
538 return -ENOENT;
539
540 found:
541 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
542 fw_suit->size = le32_to_cpu(mfw_info->size);
543 return 0;
544 }
545
rtw89_mfw_get_size(struct rtw89_dev * rtwdev)546 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev)
547 {
548 struct rtw89_fw_info *fw_info = &rtwdev->fw;
549 const struct firmware *firmware = fw_info->req.firmware;
550 const struct rtw89_mfw_hdr *mfw_hdr =
551 (const struct rtw89_mfw_hdr *)firmware->data;
552 const struct rtw89_mfw_info *mfw_info;
553 u32 size;
554
555 if (mfw_hdr->sig != RTW89_MFW_SIG) {
556 rtw89_warn(rtwdev, "not mfw format\n");
557 return 0;
558 }
559
560 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1];
561 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size);
562
563 return size;
564 }
565
rtw89_fw_update_ver_v0(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit,const struct rtw89_fw_hdr * hdr)566 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev,
567 struct rtw89_fw_suit *fw_suit,
568 const struct rtw89_fw_hdr *hdr)
569 {
570 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION);
571 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION);
572 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION);
573 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX);
574 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID);
575 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR);
576 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH);
577 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE);
578 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR);
579 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN);
580 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION);
581 }
582
rtw89_fw_update_ver_v1(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit,const struct rtw89_fw_hdr_v1 * hdr)583 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev,
584 struct rtw89_fw_suit *fw_suit,
585 const struct rtw89_fw_hdr_v1 *hdr)
586 {
587 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION);
588 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION);
589 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION);
590 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX);
591 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID);
592 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR);
593 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH);
594 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE);
595 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR);
596 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN);
597 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION);
598 }
599
rtw89_fw_update_ver(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,struct rtw89_fw_suit * fw_suit)600 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
601 enum rtw89_fw_type type,
602 struct rtw89_fw_suit *fw_suit)
603 {
604 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data;
605 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data;
606
607 if (type == RTW89_FW_LOGFMT)
608 return 0;
609
610 fw_suit->type = type;
611 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER);
612
613 switch (fw_suit->hdr_ver) {
614 case 0:
615 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0);
616 break;
617 case 1:
618 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1);
619 break;
620 default:
621 rtw89_err(rtwdev, "Unknown firmware header version %u\n",
622 fw_suit->hdr_ver);
623 return -ENOENT;
624 }
625
626 rtw89_info(rtwdev,
627 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n",
628 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver,
629 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type);
630
631 return 0;
632 }
633
634 static
__rtw89_fw_recognize(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,bool nowarn)635 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
636 bool nowarn)
637 {
638 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
639 int ret;
640
641 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn);
642 if (ret)
643 return ret;
644
645 return rtw89_fw_update_ver(rtwdev, type, fw_suit);
646 }
647
648 static
__rtw89_fw_recognize_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)649 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev,
650 const struct rtw89_fw_element_hdr *elm,
651 const union rtw89_fw_element_arg arg)
652 {
653 enum rtw89_fw_type type = arg.fw_type;
654 struct rtw89_hal *hal = &rtwdev->hal;
655 struct rtw89_fw_suit *fw_suit;
656
657 /* Version of BB MCU is in decreasing order in firmware file, so take
658 * first equal or less version, which is equal or less but closest version.
659 */
660 if (hal->cv < elm->u.bbmcu.cv)
661 return 1; /* ignore this element */
662
663 fw_suit = rtw89_fw_suit_get(rtwdev, type);
664 if (fw_suit->data)
665 return 1; /* ignore this element (a firmware is taken already) */
666
667 fw_suit->data = elm->u.bbmcu.contents;
668 fw_suit->size = le32_to_cpu(elm->size);
669
670 return rtw89_fw_update_ver(rtwdev, type, fw_suit);
671 }
672
673 #define __DEF_FW_FEAT_COND(__cond, __op) \
674 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \
675 { \
676 return suit_ver_code __op comp_ver_code; \
677 }
678
679 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */
680 __DEF_FW_FEAT_COND(le, <=); /* less or equal */
681 __DEF_FW_FEAT_COND(lt, <); /* less than */
682
683 struct __fw_feat_cfg {
684 enum rtw89_core_chip_id chip_id;
685 enum rtw89_fw_feature feature;
686 u32 ver_code;
687 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
688 };
689
690 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
691 { \
692 .chip_id = _chip, \
693 .feature = RTW89_FW_FEATURE_ ## _feat, \
694 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
695 .cond = __fw_feat_cond_ ## _cond, \
696 }
697
698 static const struct __fw_feat_cfg fw_feat_tbl[] = {
699 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE),
700 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD),
701 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER),
702 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
703 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
704 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
705 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
706 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX),
707 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP),
708 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG),
709 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
710 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER),
711 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
712 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER),
713 __CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX),
714 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
715 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
716 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER),
717 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
718 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER),
719 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
720 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
721 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
722 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
723 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
724 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1),
725 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
726 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
727 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
728 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0),
729 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
730 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1),
731 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0),
732 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO),
733 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0),
734 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO),
735 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0),
736 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1),
737 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1),
738 };
739
rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info * fw,const struct rtw89_chip_info * chip,u32 ver_code)740 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
741 const struct rtw89_chip_info *chip,
742 u32 ver_code)
743 {
744 int i;
745
746 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
747 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i];
748
749 if (chip->chip_id != ent->chip_id)
750 continue;
751
752 if (ent->cond(ver_code, ent->ver_code))
753 RTW89_SET_FW_FEATURE(ent->feature, fw);
754 }
755 }
756
rtw89_fw_recognize_features(struct rtw89_dev * rtwdev)757 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
758 {
759 const struct rtw89_chip_info *chip = rtwdev->chip;
760 const struct rtw89_fw_suit *fw_suit;
761 u32 suit_ver_code;
762
763 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
764 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
765
766 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code);
767 }
768
769 const struct firmware *
rtw89_early_fw_feature_recognize(struct device * device,const struct rtw89_chip_info * chip,struct rtw89_fw_info * early_fw,int * used_fw_format)770 rtw89_early_fw_feature_recognize(struct device *device,
771 const struct rtw89_chip_info *chip,
772 struct rtw89_fw_info *early_fw,
773 int *used_fw_format)
774 {
775 const struct firmware *firmware;
776 char fw_name[64];
777 int fw_format;
778 u32 ver_code;
779 int ret;
780
781 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) {
782 rtw89_fw_get_filename(fw_name, sizeof(fw_name),
783 chip->fw_basename, fw_format);
784
785 ret = request_firmware(&firmware, fw_name, device);
786 if (!ret) {
787 dev_info(device, "loaded firmware %s\n", fw_name);
788 *used_fw_format = fw_format;
789 break;
790 }
791 }
792
793 if (ret) {
794 dev_err(device, "failed to early request firmware: %d\n", ret);
795 return NULL;
796 }
797
798 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data);
799
800 if (!ver_code)
801 goto out;
802
803 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code);
804
805 out:
806 return firmware;
807 }
808
rtw89_fw_validate_ver_required(struct rtw89_dev * rtwdev)809 static int rtw89_fw_validate_ver_required(struct rtw89_dev *rtwdev)
810 {
811 const struct rtw89_chip_variant *variant = rtwdev->variant;
812 const struct rtw89_fw_suit *fw_suit;
813 u32 suit_ver_code;
814
815 if (!variant)
816 return 0;
817
818 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
819 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
820
821 if (variant->fw_min_ver_code > suit_ver_code) {
822 rtw89_err(rtwdev, "minimum required firmware version is 0x%x\n",
823 variant->fw_min_ver_code);
824 return -ENOENT;
825 }
826
827 return 0;
828 }
829
rtw89_fw_recognize(struct rtw89_dev * rtwdev)830 int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
831 {
832 const struct rtw89_chip_info *chip = rtwdev->chip;
833 int ret;
834
835 if (chip->try_ce_fw) {
836 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true);
837 if (!ret)
838 goto normal_done;
839 }
840
841 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false);
842 if (ret)
843 return ret;
844
845 normal_done:
846 ret = rtw89_fw_validate_ver_required(rtwdev);
847 if (ret)
848 return ret;
849
850 /* It still works if wowlan firmware isn't existing. */
851 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false);
852
853 /* It still works if log format file isn't existing. */
854 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true);
855
856 rtw89_fw_recognize_features(rtwdev);
857
858 rtw89_coex_recognize_ver(rtwdev);
859
860 return 0;
861 }
862
863 static
rtw89_build_phy_tbl_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)864 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev,
865 const struct rtw89_fw_element_hdr *elm,
866 const union rtw89_fw_element_arg arg)
867 {
868 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
869 struct rtw89_phy_table *tbl;
870 struct rtw89_reg2_def *regs;
871 enum rtw89_rf_path rf_path;
872 u32 n_regs, i;
873 u8 idx;
874
875 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
876 if (!tbl)
877 return -ENOMEM;
878
879 switch (le32_to_cpu(elm->id)) {
880 case RTW89_FW_ELEMENT_ID_BB_REG:
881 elm_info->bb_tbl = tbl;
882 break;
883 case RTW89_FW_ELEMENT_ID_BB_GAIN:
884 elm_info->bb_gain = tbl;
885 break;
886 case RTW89_FW_ELEMENT_ID_RADIO_A:
887 case RTW89_FW_ELEMENT_ID_RADIO_B:
888 case RTW89_FW_ELEMENT_ID_RADIO_C:
889 case RTW89_FW_ELEMENT_ID_RADIO_D:
890 rf_path = arg.rf_path;
891 idx = elm->u.reg2.idx;
892
893 elm_info->rf_radio[idx] = tbl;
894 tbl->rf_path = rf_path;
895 tbl->config = rtw89_phy_config_rf_reg_v1;
896 break;
897 case RTW89_FW_ELEMENT_ID_RF_NCTL:
898 elm_info->rf_nctl = tbl;
899 break;
900 default:
901 kfree(tbl);
902 return -ENOENT;
903 }
904
905 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]);
906 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL);
907 if (!regs)
908 goto out;
909
910 for (i = 0; i < n_regs; i++) {
911 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr);
912 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data);
913 }
914
915 tbl->n_regs = n_regs;
916 tbl->regs = regs;
917
918 return 0;
919
920 out:
921 kfree(tbl);
922 return -ENOMEM;
923 }
924
925 static
rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)926 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev,
927 const struct rtw89_fw_element_hdr *elm,
928 const union rtw89_fw_element_arg arg)
929 {
930 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr;
931 const unsigned long offset = arg.offset;
932 struct rtw89_efuse *efuse = &rtwdev->efuse;
933 struct rtw89_txpwr_conf *conf;
934
935 if (!rtwdev->rfe_data) {
936 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL);
937 if (!rtwdev->rfe_data)
938 return -ENOMEM;
939 }
940
941 conf = (void *)rtwdev->rfe_data + offset;
942
943 /* if multiple matched, take the last eventually */
944 if (txpwr_elm->rfe_type == efuse->rfe_type)
945 goto setup;
946
947 /* without one is matched, accept default */
948 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE &&
949 (!rtw89_txpwr_conf_valid(conf) ||
950 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE))
951 goto setup;
952
953 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n",
954 elm->id, txpwr_elm->rfe_type);
955 return 0;
956
957 setup:
958 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n",
959 elm->id, txpwr_elm->rfe_type);
960
961 conf->rfe_type = txpwr_elm->rfe_type;
962 conf->ent_sz = txpwr_elm->ent_sz;
963 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents);
964 conf->data = txpwr_elm->content;
965 return 0;
966 }
967
968 static
rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)969 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev,
970 const struct rtw89_fw_element_hdr *elm,
971 const union rtw89_fw_element_arg arg)
972 {
973 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
974 const struct rtw89_chip_info *chip = rtwdev->chip;
975 u32 needed_bitmap = 0;
976 u32 offset = 0;
977 int subband;
978 u32 bitmap;
979 int type;
980
981 if (chip->support_bands & BIT(NL80211_BAND_6GHZ))
982 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ;
983 if (chip->support_bands & BIT(NL80211_BAND_5GHZ))
984 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ;
985 if (chip->support_bands & BIT(NL80211_BAND_2GHZ))
986 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ;
987
988 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap);
989
990 if ((bitmap & needed_bitmap) != needed_bitmap) {
991 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %08x\n",
992 needed_bitmap, bitmap);
993 return -ENOENT;
994 }
995
996 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL);
997 if (!elm_info->txpwr_trk)
998 return -ENOMEM;
999
1000 for (type = 0; bitmap; type++, bitmap >>= 1) {
1001 if (!(bitmap & BIT(0)))
1002 continue;
1003
1004 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START &&
1005 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX)
1006 subband = 4;
1007 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START &&
1008 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX)
1009 subband = 3;
1010 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START &&
1011 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX)
1012 subband = 1;
1013 else
1014 break;
1015
1016 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset];
1017
1018 offset += subband;
1019 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size))
1020 goto err;
1021 }
1022
1023 return 0;
1024
1025 err:
1026 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n",
1027 offset, le32_to_cpu(elm->size));
1028 kfree(elm_info->txpwr_trk);
1029 elm_info->txpwr_trk = NULL;
1030
1031 return -EFAULT;
1032 }
1033
1034 static
rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1035 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev,
1036 const struct rtw89_fw_element_hdr *elm,
1037 const union rtw89_fw_element_arg arg)
1038 {
1039 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1040 u8 rfk_id;
1041
1042 if (elm_info->rfk_log_fmt)
1043 goto allocated;
1044
1045 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL);
1046 if (!elm_info->rfk_log_fmt)
1047 return 1; /* this is an optional element, so just ignore this */
1048
1049 allocated:
1050 rfk_id = elm->u.rfk_log_fmt.rfk_id;
1051 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM)
1052 return 1;
1053
1054 elm_info->rfk_log_fmt->elm[rfk_id] = elm;
1055
1056 return 0;
1057 }
1058
1059 static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
1060 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
1061 { .fw_type = RTW89_FW_BBMCU0 }, NULL},
1062 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm,
1063 { .fw_type = RTW89_FW_BBMCU1 }, NULL},
1064 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"},
1065 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL},
1066 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm,
1067 { .rf_path = RF_PATH_A }, "radio A"},
1068 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm,
1069 { .rf_path = RF_PATH_B }, NULL},
1070 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm,
1071 { .rf_path = RF_PATH_C }, NULL},
1072 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm,
1073 { .rf_path = RF_PATH_D }, NULL},
1074 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"},
1075 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = {
1076 rtw89_fw_recognize_txpwr_from_elm,
1077 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR",
1078 },
1079 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = {
1080 rtw89_fw_recognize_txpwr_from_elm,
1081 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL,
1082 },
1083 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = {
1084 rtw89_fw_recognize_txpwr_from_elm,
1085 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL,
1086 },
1087 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = {
1088 rtw89_fw_recognize_txpwr_from_elm,
1089 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL,
1090 },
1091 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = {
1092 rtw89_fw_recognize_txpwr_from_elm,
1093 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL,
1094 },
1095 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = {
1096 rtw89_fw_recognize_txpwr_from_elm,
1097 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL,
1098 },
1099 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = {
1100 rtw89_fw_recognize_txpwr_from_elm,
1101 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL,
1102 },
1103 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = {
1104 rtw89_fw_recognize_txpwr_from_elm,
1105 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL,
1106 },
1107 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = {
1108 rtw89_fw_recognize_txpwr_from_elm,
1109 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL,
1110 },
1111 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = {
1112 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK",
1113 },
1114 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = {
1115 rtw89_build_rfk_log_fmt_from_elm, {}, NULL,
1116 },
1117 };
1118
rtw89_fw_recognize_elements(struct rtw89_dev * rtwdev)1119 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
1120 {
1121 struct rtw89_fw_info *fw_info = &rtwdev->fw;
1122 const struct firmware *firmware = fw_info->req.firmware;
1123 const struct rtw89_chip_info *chip = rtwdev->chip;
1124 u32 unrecognized_elements = chip->needed_fw_elms;
1125 const struct rtw89_fw_element_handler *handler;
1126 const struct rtw89_fw_element_hdr *hdr;
1127 u32 elm_size;
1128 u32 elem_id;
1129 u32 offset;
1130 int ret;
1131
1132 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM);
1133
1134 offset = rtw89_mfw_get_size(rtwdev);
1135 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
1136 if (offset == 0)
1137 return -EINVAL;
1138
1139 while (offset + sizeof(*hdr) < firmware->size) {
1140 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset);
1141
1142 elm_size = le32_to_cpu(hdr->size);
1143 if (offset + elm_size >= firmware->size) {
1144 rtw89_warn(rtwdev, "firmware element size exceeds\n");
1145 break;
1146 }
1147
1148 elem_id = le32_to_cpu(hdr->id);
1149 if (elem_id >= ARRAY_SIZE(__fw_element_handlers))
1150 goto next;
1151
1152 handler = &__fw_element_handlers[elem_id];
1153 if (!handler->fn)
1154 goto next;
1155
1156 ret = handler->fn(rtwdev, hdr, handler->arg);
1157 if (ret == 1) /* ignore this element */
1158 goto next;
1159 if (ret)
1160 return ret;
1161
1162 if (handler->name)
1163 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n",
1164 handler->name, hdr->ver);
1165
1166 unrecognized_elements &= ~BIT(elem_id);
1167 next:
1168 offset += sizeof(*hdr) + elm_size;
1169 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
1170 }
1171
1172 if (unrecognized_elements) {
1173 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n",
1174 unrecognized_elements);
1175 return -ENOENT;
1176 }
1177
1178 return 0;
1179 }
1180
rtw89_h2c_pkt_set_hdr(struct rtw89_dev * rtwdev,struct sk_buff * skb,u8 type,u8 cat,u8 class,u8 func,bool rack,bool dack,u32 len)1181 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
1182 u8 type, u8 cat, u8 class, u8 func,
1183 bool rack, bool dack, u32 len)
1184 {
1185 struct fwcmd_hdr *hdr;
1186
1187 hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
1188
1189 if (!(rtwdev->fw.h2c_seq % 4))
1190 rack = true;
1191 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
1192 FIELD_PREP(H2C_HDR_CAT, cat) |
1193 FIELD_PREP(H2C_HDR_CLASS, class) |
1194 FIELD_PREP(H2C_HDR_FUNC, func) |
1195 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
1196
1197 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
1198 len + H2C_HEADER_LEN) |
1199 (rack ? H2C_HDR_REC_ACK : 0) |
1200 (dack ? H2C_HDR_DONE_ACK : 0));
1201
1202 rtwdev->fw.h2c_seq++;
1203 }
1204
rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev * rtwdev,struct sk_buff * skb,u8 type,u8 cat,u8 class,u8 func,u32 len)1205 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
1206 struct sk_buff *skb,
1207 u8 type, u8 cat, u8 class, u8 func,
1208 u32 len)
1209 {
1210 struct fwcmd_hdr *hdr;
1211
1212 hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
1213
1214 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
1215 FIELD_PREP(H2C_HDR_CAT, cat) |
1216 FIELD_PREP(H2C_HDR_CLASS, class) |
1217 FIELD_PREP(H2C_HDR_FUNC, func) |
1218 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
1219
1220 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
1221 len + H2C_HEADER_LEN));
1222 }
1223
__rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr * fw_hdr)1224 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev,
1225 struct rtw89_fw_bin_info *info,
1226 struct rtw89_fw_hdr *fw_hdr)
1227 {
1228 struct rtw89_fw_hdr_section_info *section_info;
1229 struct rtw89_fw_hdr_section *section;
1230 int i;
1231
1232 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
1233 FW_HDR_W7_PART_SIZE);
1234
1235 for (i = 0; i < info->section_num; i++) {
1236 section_info = &info->section_info[i];
1237
1238 if (!section_info->len_override)
1239 continue;
1240
1241 section = &fw_hdr->sections[i];
1242 le32p_replace_bits(§ion->w1, section_info->len_override,
1243 FWSECTION_HDR_W1_SEC_SIZE);
1244 }
1245
1246 return 0;
1247 }
1248
__rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr_v1 * fw_hdr)1249 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev,
1250 struct rtw89_fw_bin_info *info,
1251 struct rtw89_fw_hdr_v1 *fw_hdr)
1252 {
1253 struct rtw89_fw_hdr_section_info *section_info;
1254 struct rtw89_fw_hdr_section_v1 *section;
1255 u8 dst_sec_idx = 0;
1256 u8 sec_idx;
1257
1258 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
1259 FW_HDR_V1_W7_PART_SIZE);
1260
1261 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) {
1262 section_info = &info->section_info[sec_idx];
1263 section = &fw_hdr->sections[sec_idx];
1264
1265 if (section_info->ignore)
1266 continue;
1267
1268 if (dst_sec_idx != sec_idx)
1269 fw_hdr->sections[dst_sec_idx] = *section;
1270
1271 dst_sec_idx++;
1272 }
1273
1274 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM);
1275
1276 return (info->section_num - dst_sec_idx) * sizeof(*section);
1277 }
1278
__rtw89_fw_download_hdr(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)1279 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
1280 const struct rtw89_fw_suit *fw_suit,
1281 struct rtw89_fw_bin_info *info)
1282 {
1283 u32 len = info->hdr_len - info->dynamic_hdr_len;
1284 struct rtw89_fw_hdr_v1 *fw_hdr_v1;
1285 const u8 *fw = fw_suit->data;
1286 struct rtw89_fw_hdr *fw_hdr;
1287 struct sk_buff *skb;
1288 u32 truncated;
1289 u32 ret = 0;
1290
1291 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1292 if (!skb) {
1293 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
1294 return -ENOMEM;
1295 }
1296
1297 skb_put_data(skb, fw, len);
1298
1299 switch (fw_suit->hdr_ver) {
1300 case 0:
1301 fw_hdr = (struct rtw89_fw_hdr *)skb->data;
1302 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr);
1303 break;
1304 case 1:
1305 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data;
1306 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1);
1307 break;
1308 default:
1309 ret = -EOPNOTSUPP;
1310 goto fail;
1311 }
1312
1313 if (truncated) {
1314 len -= truncated;
1315 skb_trim(skb, len);
1316 }
1317
1318 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
1319 H2C_CAT_MAC, H2C_CL_MAC_FWDL,
1320 H2C_FUNC_MAC_FWHDR_DL, len);
1321
1322 ret = rtw89_h2c_tx(rtwdev, skb, false);
1323 if (ret) {
1324 rtw89_err(rtwdev, "failed to send h2c\n");
1325 ret = -1;
1326 goto fail;
1327 }
1328
1329 return 0;
1330 fail:
1331 dev_kfree_skb_any(skb);
1332
1333 return ret;
1334 }
1335
rtw89_fw_download_hdr(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)1336 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
1337 const struct rtw89_fw_suit *fw_suit,
1338 struct rtw89_fw_bin_info *info)
1339 {
1340 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1341 int ret;
1342
1343 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info);
1344 if (ret) {
1345 rtw89_err(rtwdev, "[ERR]FW header download\n");
1346 return ret;
1347 }
1348
1349 ret = mac->fwdl_check_path_ready(rtwdev, false);
1350 if (ret) {
1351 rtw89_err(rtwdev, "[ERR]FWDL path ready\n");
1352 return ret;
1353 }
1354
1355 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
1356 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
1357
1358 return 0;
1359 }
1360
__rtw89_fw_download_main(struct rtw89_dev * rtwdev,struct rtw89_fw_hdr_section_info * info)1361 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
1362 struct rtw89_fw_hdr_section_info *info)
1363 {
1364 struct sk_buff *skb;
1365 const u8 *section = info->addr;
1366 u32 residue_len = info->len;
1367 bool copy_key = false;
1368 u32 pkt_len;
1369 int ret;
1370
1371 if (info->ignore)
1372 return 0;
1373
1374 if (info->len_override) {
1375 if (info->len_override > info->len)
1376 rtw89_warn(rtwdev, "override length %u larger than original %u\n",
1377 info->len_override, info->len);
1378 else
1379 residue_len = info->len_override;
1380 }
1381
1382 if (info->key_addr && info->key_len) {
1383 if (residue_len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len)
1384 rtw89_warn(rtwdev,
1385 "ignore to copy key data because of len %d, %d, %d, %d\n",
1386 info->len, FWDL_SECTION_PER_PKT_LEN,
1387 info->key_len, residue_len);
1388 else
1389 copy_key = true;
1390 }
1391
1392 while (residue_len) {
1393 if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
1394 pkt_len = FWDL_SECTION_PER_PKT_LEN;
1395 else
1396 pkt_len = residue_len;
1397
1398 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len);
1399 if (!skb) {
1400 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1401 return -ENOMEM;
1402 }
1403 skb_put_data(skb, section, pkt_len);
1404
1405 if (copy_key)
1406 memcpy(skb->data + pkt_len - info->key_len,
1407 info->key_addr, info->key_len);
1408
1409 ret = rtw89_h2c_tx(rtwdev, skb, true);
1410 if (ret) {
1411 rtw89_err(rtwdev, "failed to send h2c\n");
1412 ret = -1;
1413 goto fail;
1414 }
1415
1416 section += pkt_len;
1417 residue_len -= pkt_len;
1418 }
1419
1420 return 0;
1421 fail:
1422 dev_kfree_skb_any(skb);
1423
1424 return ret;
1425 }
1426
1427 static enum rtw89_fwdl_check_type
rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit)1428 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev,
1429 const struct rtw89_fw_suit *fw_suit)
1430 {
1431 switch (fw_suit->type) {
1432 case RTW89_FW_BBMCU0:
1433 return RTW89_FWDL_CHECK_BB0_FWDL_DONE;
1434 case RTW89_FW_BBMCU1:
1435 return RTW89_FWDL_CHECK_BB1_FWDL_DONE;
1436 default:
1437 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE;
1438 }
1439 }
1440
rtw89_fw_download_main(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)1441 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev,
1442 const struct rtw89_fw_suit *fw_suit,
1443 struct rtw89_fw_bin_info *info)
1444 {
1445 struct rtw89_fw_hdr_section_info *section_info = info->section_info;
1446 const struct rtw89_chip_info *chip = rtwdev->chip;
1447 enum rtw89_fwdl_check_type chk_type;
1448 u8 section_num = info->section_num;
1449 int ret;
1450
1451 while (section_num--) {
1452 ret = __rtw89_fw_download_main(rtwdev, section_info);
1453 if (ret)
1454 return ret;
1455 section_info++;
1456 }
1457
1458 if (chip->chip_gen == RTW89_CHIP_AX)
1459 return 0;
1460
1461 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit);
1462 ret = rtw89_fw_check_rdy(rtwdev, chk_type);
1463 if (ret) {
1464 rtw89_warn(rtwdev, "failed to download firmware type %u\n",
1465 fw_suit->type);
1466 return ret;
1467 }
1468
1469 return 0;
1470 }
1471
rtw89_fw_prog_cnt_dump(struct rtw89_dev * rtwdev)1472 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
1473 {
1474 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
1475 u32 addr = R_AX_DBG_PORT_SEL;
1476 u32 val32;
1477 u16 index;
1478
1479 if (chip_gen == RTW89_CHIP_BE) {
1480 addr = R_BE_WLCPU_PORT_PC;
1481 goto dump;
1482 }
1483
1484 rtw89_write32(rtwdev, R_AX_DBG_CTRL,
1485 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
1486 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
1487 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
1488
1489 dump:
1490 for (index = 0; index < 15; index++) {
1491 val32 = rtw89_read32(rtwdev, addr);
1492 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
1493 fsleep(10);
1494 }
1495 }
1496
rtw89_fw_dl_fail_dump(struct rtw89_dev * rtwdev)1497 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
1498 {
1499 u32 val32;
1500
1501 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
1502 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
1503
1504 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG);
1505 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32);
1506
1507 rtw89_fw_prog_cnt_dump(rtwdev);
1508 }
1509
rtw89_fw_download_suit(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit)1510 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
1511 struct rtw89_fw_suit *fw_suit)
1512 {
1513 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1514 struct rtw89_fw_bin_info info = {};
1515 int ret;
1516
1517 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info);
1518 if (ret) {
1519 rtw89_err(rtwdev, "parse fw header fail\n");
1520 return ret;
1521 }
1522
1523 rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode);
1524
1525 if (rtwdev->chip->chip_id == RTL8922A &&
1526 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN))
1527 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000);
1528
1529 ret = mac->fwdl_check_path_ready(rtwdev, true);
1530 if (ret) {
1531 rtw89_err(rtwdev, "[ERR]H2C path ready\n");
1532 return ret;
1533 }
1534
1535 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info);
1536 if (ret)
1537 return ret;
1538
1539 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info);
1540 if (ret)
1541 return ret;
1542
1543 return 0;
1544 }
1545
1546 static
__rtw89_fw_download(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,bool include_bb)1547 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
1548 bool include_bb)
1549 {
1550 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1551 struct rtw89_fw_info *fw_info = &rtwdev->fw;
1552 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
1553 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr;
1554 int ret;
1555 int i;
1556
1557 mac->disable_cpu(rtwdev);
1558 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb);
1559 if (ret)
1560 return ret;
1561
1562 ret = rtw89_fw_download_suit(rtwdev, fw_suit);
1563 if (ret)
1564 goto fwdl_err;
1565
1566 for (i = 0; i < bbmcu_nr && include_bb; i++) {
1567 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i);
1568
1569 ret = rtw89_fw_download_suit(rtwdev, fw_suit);
1570 if (ret)
1571 goto fwdl_err;
1572 }
1573
1574 fw_info->h2c_seq = 0;
1575 fw_info->rec_seq = 0;
1576 fw_info->h2c_counter = 0;
1577 fw_info->c2h_counter = 0;
1578 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX;
1579 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX;
1580
1581 mdelay(5);
1582
1583 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE);
1584 if (ret) {
1585 rtw89_warn(rtwdev, "download firmware fail\n");
1586 goto fwdl_err;
1587 }
1588
1589 return ret;
1590
1591 fwdl_err:
1592 rtw89_fw_dl_fail_dump(rtwdev);
1593 return ret;
1594 }
1595
rtw89_fw_download(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,bool include_bb)1596 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
1597 bool include_bb)
1598 {
1599 int retry;
1600 int ret;
1601
1602 for (retry = 0; retry < 5; retry++) {
1603 ret = __rtw89_fw_download(rtwdev, type, include_bb);
1604 if (!ret)
1605 return 0;
1606 }
1607
1608 return ret;
1609 }
1610
rtw89_wait_firmware_completion(struct rtw89_dev * rtwdev)1611 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
1612 {
1613 struct rtw89_fw_info *fw = &rtwdev->fw;
1614
1615 wait_for_completion(&fw->req.completion);
1616 if (!fw->req.firmware)
1617 return -EINVAL;
1618
1619 return 0;
1620 }
1621
rtw89_load_firmware_req(struct rtw89_dev * rtwdev,struct rtw89_fw_req_info * req,const char * fw_name,bool nowarn)1622 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev,
1623 struct rtw89_fw_req_info *req,
1624 const char *fw_name, bool nowarn)
1625 {
1626 int ret;
1627
1628 if (req->firmware) {
1629 rtw89_debug(rtwdev, RTW89_DBG_FW,
1630 "full firmware has been early requested\n");
1631 complete_all(&req->completion);
1632 return 0;
1633 }
1634
1635 if (nowarn)
1636 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev);
1637 else
1638 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev);
1639
1640 complete_all(&req->completion);
1641
1642 return ret;
1643 }
1644
rtw89_load_firmware_work(struct work_struct * work)1645 void rtw89_load_firmware_work(struct work_struct *work)
1646 {
1647 struct rtw89_dev *rtwdev =
1648 container_of(work, struct rtw89_dev, load_firmware_work);
1649 const struct rtw89_chip_info *chip = rtwdev->chip;
1650 char fw_name[64];
1651
1652 rtw89_fw_get_filename(fw_name, sizeof(fw_name),
1653 chip->fw_basename, rtwdev->fw.fw_format);
1654
1655 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false);
1656 }
1657
rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table * tbl)1658 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl)
1659 {
1660 if (!tbl)
1661 return;
1662
1663 kfree(tbl->regs);
1664 kfree(tbl);
1665 }
1666
rtw89_unload_firmware_elements(struct rtw89_dev * rtwdev)1667 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev)
1668 {
1669 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1670 int i;
1671
1672 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl);
1673 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain);
1674 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++)
1675 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]);
1676 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl);
1677
1678 kfree(elm_info->txpwr_trk);
1679 kfree(elm_info->rfk_log_fmt);
1680 }
1681
rtw89_unload_firmware(struct rtw89_dev * rtwdev)1682 void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
1683 {
1684 struct rtw89_fw_info *fw = &rtwdev->fw;
1685
1686 cancel_work_sync(&rtwdev->load_firmware_work);
1687
1688 if (fw->req.firmware) {
1689 release_firmware(fw->req.firmware);
1690
1691 /* assign NULL back in case rtw89_free_ieee80211_hw()
1692 * try to release the same one again.
1693 */
1694 fw->req.firmware = NULL;
1695 }
1696
1697 kfree(fw->log.fmts);
1698 rtw89_unload_firmware_elements(rtwdev);
1699 }
1700
rtw89_fw_log_get_fmt_idx(struct rtw89_dev * rtwdev,u32 fmt_id)1701 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id)
1702 {
1703 struct rtw89_fw_log *fw_log = &rtwdev->fw.log;
1704 u32 i;
1705
1706 if (fmt_id > fw_log->last_fmt_id)
1707 return 0;
1708
1709 for (i = 0; i < fw_log->fmt_count; i++) {
1710 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id)
1711 return i;
1712 }
1713 return 0;
1714 }
1715
rtw89_fw_log_create_fmts_dict(struct rtw89_dev * rtwdev)1716 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev)
1717 {
1718 struct rtw89_fw_log *log = &rtwdev->fw.log;
1719 const struct rtw89_fw_logsuit_hdr *suit_hdr;
1720 struct rtw89_fw_suit *suit = &log->suit;
1721 const void *fmts_ptr, *fmts_end_ptr;
1722 u32 fmt_count;
1723 int i;
1724
1725 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data;
1726 fmt_count = le32_to_cpu(suit_hdr->count);
1727 log->fmt_ids = suit_hdr->ids;
1728 fmts_ptr = &suit_hdr->ids[fmt_count];
1729 fmts_end_ptr = suit->data + suit->size;
1730 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL);
1731 if (!log->fmts)
1732 return -ENOMEM;
1733
1734 for (i = 0; i < fmt_count; i++) {
1735 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr);
1736 if (!fmts_ptr)
1737 break;
1738
1739 (*log->fmts)[i] = fmts_ptr;
1740 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]);
1741 log->fmt_count++;
1742 fmts_ptr += strlen(fmts_ptr);
1743 }
1744
1745 return 0;
1746 }
1747
rtw89_fw_log_prepare(struct rtw89_dev * rtwdev)1748 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev)
1749 {
1750 struct rtw89_fw_log *log = &rtwdev->fw.log;
1751 struct rtw89_fw_suit *suit = &log->suit;
1752
1753 if (!suit || !suit->data) {
1754 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n");
1755 return -EINVAL;
1756 }
1757 if (log->fmts)
1758 return 0;
1759
1760 return rtw89_fw_log_create_fmts_dict(rtwdev);
1761 }
1762
rtw89_fw_log_dump_data(struct rtw89_dev * rtwdev,const struct rtw89_fw_c2h_log_fmt * log_fmt,u32 fmt_idx,u8 para_int,bool raw_data)1763 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev,
1764 const struct rtw89_fw_c2h_log_fmt *log_fmt,
1765 u32 fmt_idx, u8 para_int, bool raw_data)
1766 {
1767 const char *(*fmts)[] = rtwdev->fw.log.fmts;
1768 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE];
1769 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0};
1770 int i;
1771
1772 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) {
1773 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n",
1774 log_fmt->argc);
1775 return;
1776 }
1777
1778 if (para_int)
1779 for (i = 0 ; i < log_fmt->argc; i++)
1780 args[i] = le32_to_cpu(log_fmt->u.argv[i]);
1781
1782 if (raw_data) {
1783 if (para_int)
1784 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
1785 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id),
1786 para_int, log_fmt->argc, (int)sizeof(args), args);
1787 else
1788 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
1789 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id),
1790 para_int, log_fmt->argc, log_fmt->u.raw);
1791 } else {
1792 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx],
1793 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4],
1794 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9],
1795 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe],
1796 args[0xf]);
1797 }
1798
1799 rtw89_info(rtwdev, "C2H log: %s", str_buf);
1800 }
1801
rtw89_fw_log_dump(struct rtw89_dev * rtwdev,u8 * buf,u32 len)1802 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
1803 {
1804 const struct rtw89_fw_c2h_log_fmt *log_fmt;
1805 u8 para_int;
1806 u32 fmt_idx;
1807
1808 if (len < RTW89_C2H_HEADER_LEN) {
1809 rtw89_err(rtwdev, "c2h log length is wrong!\n");
1810 return;
1811 }
1812
1813 buf += RTW89_C2H_HEADER_LEN;
1814 len -= RTW89_C2H_HEADER_LEN;
1815 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf;
1816
1817 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN)
1818 goto plain_log;
1819
1820 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE))
1821 goto plain_log;
1822
1823 if (!rtwdev->fw.log.fmts)
1824 return;
1825
1826 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT);
1827 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id));
1828
1829 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0)
1830 rtw89_info(rtwdev, "C2H log: %s%s",
1831 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw);
1832 else if (fmt_idx != 0 && para_int)
1833 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false);
1834 else
1835 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true);
1836 return;
1837
1838 plain_log:
1839 rtw89_info(rtwdev, "C2H log: %.*s", len, buf);
1840
1841 }
1842
1843 #define H2C_CAM_LEN 60
rtw89_fw_h2c_cam(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,const u8 * scan_mac_addr)1844 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
1845 struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr)
1846 {
1847 struct sk_buff *skb;
1848 int ret;
1849
1850 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
1851 if (!skb) {
1852 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1853 return -ENOMEM;
1854 }
1855 skb_put(skb, H2C_CAM_LEN);
1856 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link, scan_mac_addr,
1857 skb->data);
1858 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, skb->data);
1859
1860 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1861 H2C_CAT_MAC,
1862 H2C_CL_MAC_ADDR_CAM_UPDATE,
1863 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
1864 H2C_CAM_LEN);
1865
1866 ret = rtw89_h2c_tx(rtwdev, skb, false);
1867 if (ret) {
1868 rtw89_err(rtwdev, "failed to send h2c\n");
1869 goto fail;
1870 }
1871
1872 return 0;
1873 fail:
1874 dev_kfree_skb_any(skb);
1875
1876 return ret;
1877 }
1878
rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)1879 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
1880 struct rtw89_vif_link *rtwvif_link,
1881 struct rtw89_sta_link *rtwsta_link)
1882 {
1883 struct rtw89_h2c_dctlinfo_ud_v1 *h2c;
1884 u32 len = sizeof(*h2c);
1885 struct sk_buff *skb;
1886 int ret;
1887
1888 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1889 if (!skb) {
1890 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
1891 return -ENOMEM;
1892 }
1893 skb_put(skb, len);
1894 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data;
1895
1896 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c);
1897
1898 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1899 H2C_CAT_MAC,
1900 H2C_CL_MAC_FR_EXCHG,
1901 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
1902 len);
1903
1904 ret = rtw89_h2c_tx(rtwdev, skb, false);
1905 if (ret) {
1906 rtw89_err(rtwdev, "failed to send h2c\n");
1907 goto fail;
1908 }
1909
1910 return 0;
1911 fail:
1912 dev_kfree_skb_any(skb);
1913
1914 return ret;
1915 }
1916 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
1917
rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)1918 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
1919 struct rtw89_vif_link *rtwvif_link,
1920 struct rtw89_sta_link *rtwsta_link)
1921 {
1922 struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
1923 u32 len = sizeof(*h2c);
1924 struct sk_buff *skb;
1925 int ret;
1926
1927 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1928 if (!skb) {
1929 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
1930 return -ENOMEM;
1931 }
1932 skb_put(skb, len);
1933 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
1934
1935 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c);
1936
1937 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1938 H2C_CAT_MAC,
1939 H2C_CL_MAC_FR_EXCHG,
1940 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
1941 len);
1942
1943 ret = rtw89_h2c_tx(rtwdev, skb, false);
1944 if (ret) {
1945 rtw89_err(rtwdev, "failed to send h2c\n");
1946 goto fail;
1947 }
1948
1949 return 0;
1950 fail:
1951 dev_kfree_skb_any(skb);
1952
1953 return ret;
1954 }
1955 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2);
1956
rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)1957 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
1958 struct rtw89_vif_link *rtwvif_link,
1959 struct rtw89_sta_link *rtwsta_link)
1960 {
1961 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
1962 struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
1963 u32 len = sizeof(*h2c);
1964 struct sk_buff *skb;
1965 int ret;
1966
1967 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1968 if (!skb) {
1969 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n");
1970 return -ENOMEM;
1971 }
1972 skb_put(skb, len);
1973 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
1974
1975 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) |
1976 le32_encode_bits(1, DCTLINFO_V2_C0_OP);
1977
1978 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL);
1979 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL);
1980 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL);
1981 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL);
1982 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL);
1983 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL);
1984 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL);
1985 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL);
1986 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL);
1987 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL);
1988 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL);
1989 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL);
1990 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL);
1991
1992 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1993 H2C_CAT_MAC,
1994 H2C_CL_MAC_FR_EXCHG,
1995 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
1996 len);
1997
1998 ret = rtw89_h2c_tx(rtwdev, skb, false);
1999 if (ret) {
2000 rtw89_err(rtwdev, "failed to send h2c\n");
2001 goto fail;
2002 }
2003
2004 return 0;
2005 fail:
2006 dev_kfree_skb_any(skb);
2007
2008 return ret;
2009 }
2010 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2);
2011
rtw89_fw_h2c_ba_cam(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,bool valid,struct ieee80211_ampdu_params * params)2012 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev,
2013 struct rtw89_vif_link *rtwvif_link,
2014 struct rtw89_sta_link *rtwsta_link,
2015 bool valid, struct ieee80211_ampdu_params *params)
2016 {
2017 const struct rtw89_chip_info *chip = rtwdev->chip;
2018 struct rtw89_h2c_ba_cam *h2c;
2019 u8 macid = rtwsta_link->mac_id;
2020 u32 len = sizeof(*h2c);
2021 struct sk_buff *skb;
2022 u8 entry_idx;
2023 int ret;
2024
2025 ret = valid ?
2026 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2027 &entry_idx) :
2028 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2029 &entry_idx);
2030 if (ret) {
2031 /* it still works even if we don't have static BA CAM, because
2032 * hardware can create dynamic BA CAM automatically.
2033 */
2034 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
2035 "failed to %s entry tid=%d for h2c ba cam\n",
2036 valid ? "alloc" : "free", params->tid);
2037 return 0;
2038 }
2039
2040 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2041 if (!skb) {
2042 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
2043 return -ENOMEM;
2044 }
2045 skb_put(skb, len);
2046 h2c = (struct rtw89_h2c_ba_cam *)skb->data;
2047
2048 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID);
2049 if (chip->bacam_ver == RTW89_BACAM_V0_EXT)
2050 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1);
2051 else
2052 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX);
2053 if (!valid)
2054 goto end;
2055 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) |
2056 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID);
2057 if (params->buf_size > 64)
2058 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
2059 else
2060 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
2061 /* If init req is set, hw will set the ssn */
2062 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) |
2063 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN);
2064
2065 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) {
2066 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) |
2067 le32_encode_bits(rtwvif_link->mac_idx,
2068 RTW89_H2C_BA_CAM_W1_BAND);
2069 }
2070
2071 end:
2072 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2073 H2C_CAT_MAC,
2074 H2C_CL_BA_CAM,
2075 H2C_FUNC_MAC_BA_CAM, 0, 1,
2076 len);
2077
2078 ret = rtw89_h2c_tx(rtwdev, skb, false);
2079 if (ret) {
2080 rtw89_err(rtwdev, "failed to send h2c\n");
2081 goto fail;
2082 }
2083
2084 return 0;
2085 fail:
2086 dev_kfree_skb_any(skb);
2087
2088 return ret;
2089 }
2090 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam);
2091
rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev * rtwdev,u8 entry_idx,u8 uid)2092 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev,
2093 u8 entry_idx, u8 uid)
2094 {
2095 struct rtw89_h2c_ba_cam *h2c;
2096 u32 len = sizeof(*h2c);
2097 struct sk_buff *skb;
2098 int ret;
2099
2100 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2101 if (!skb) {
2102 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
2103 return -ENOMEM;
2104 }
2105 skb_put(skb, len);
2106 h2c = (struct rtw89_h2c_ba_cam *)skb->data;
2107
2108 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID);
2109 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) |
2110 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) |
2111 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) |
2112 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN);
2113
2114 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2115 H2C_CAT_MAC,
2116 H2C_CL_BA_CAM,
2117 H2C_FUNC_MAC_BA_CAM, 0, 1,
2118 len);
2119
2120 ret = rtw89_h2c_tx(rtwdev, skb, false);
2121 if (ret) {
2122 rtw89_err(rtwdev, "failed to send h2c\n");
2123 goto fail;
2124 }
2125
2126 return 0;
2127 fail:
2128 dev_kfree_skb_any(skb);
2129
2130 return ret;
2131 }
2132
rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev * rtwdev)2133 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev)
2134 {
2135 const struct rtw89_chip_info *chip = rtwdev->chip;
2136 u8 entry_idx = chip->bacam_num;
2137 u8 uid = 0;
2138 int i;
2139
2140 for (i = 0; i < chip->bacam_dynamic_num; i++) {
2141 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid);
2142 entry_idx++;
2143 uid++;
2144 }
2145 }
2146
rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,bool valid,struct ieee80211_ampdu_params * params)2147 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev,
2148 struct rtw89_vif_link *rtwvif_link,
2149 struct rtw89_sta_link *rtwsta_link,
2150 bool valid, struct ieee80211_ampdu_params *params)
2151 {
2152 const struct rtw89_chip_info *chip = rtwdev->chip;
2153 struct rtw89_h2c_ba_cam_v1 *h2c;
2154 u8 macid = rtwsta_link->mac_id;
2155 u32 len = sizeof(*h2c);
2156 struct sk_buff *skb;
2157 u8 entry_idx;
2158 u8 bmap_size;
2159 int ret;
2160
2161 ret = valid ?
2162 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2163 &entry_idx) :
2164 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2165 &entry_idx);
2166 if (ret) {
2167 /* it still works even if we don't have static BA CAM, because
2168 * hardware can create dynamic BA CAM automatically.
2169 */
2170 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
2171 "failed to %s entry tid=%d for h2c ba cam\n",
2172 valid ? "alloc" : "free", params->tid);
2173 return 0;
2174 }
2175
2176 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2177 if (!skb) {
2178 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
2179 return -ENOMEM;
2180 }
2181 skb_put(skb, len);
2182 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data;
2183
2184 if (params->buf_size > 512)
2185 bmap_size = 10;
2186 else if (params->buf_size > 256)
2187 bmap_size = 8;
2188 else if (params->buf_size > 64)
2189 bmap_size = 4;
2190 else
2191 bmap_size = 0;
2192
2193 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) |
2194 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) |
2195 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) |
2196 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) |
2197 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) |
2198 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK);
2199
2200 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */
2201 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) |
2202 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) |
2203 le32_encode_bits(!!rtwvif_link->mac_idx,
2204 RTW89_H2C_BA_CAM_V1_W1_BAND_SEL);
2205
2206 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2207 H2C_CAT_MAC,
2208 H2C_CL_BA_CAM,
2209 H2C_FUNC_MAC_BA_CAM_V1, 0, 1,
2210 len);
2211
2212 ret = rtw89_h2c_tx(rtwdev, skb, false);
2213 if (ret) {
2214 rtw89_err(rtwdev, "failed to send h2c\n");
2215 goto fail;
2216 }
2217
2218 return 0;
2219 fail:
2220 dev_kfree_skb_any(skb);
2221
2222 return ret;
2223 }
2224 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1);
2225
rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev * rtwdev,u8 users,u8 offset,u8 mac_idx)2226 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
2227 u8 offset, u8 mac_idx)
2228 {
2229 struct rtw89_h2c_ba_cam_init *h2c;
2230 u32 len = sizeof(*h2c);
2231 struct sk_buff *skb;
2232 int ret;
2233
2234 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2235 if (!skb) {
2236 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n");
2237 return -ENOMEM;
2238 }
2239 skb_put(skb, len);
2240 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data;
2241
2242 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) |
2243 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) |
2244 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL);
2245
2246 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2247 H2C_CAT_MAC,
2248 H2C_CL_BA_CAM,
2249 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1,
2250 len);
2251
2252 ret = rtw89_h2c_tx(rtwdev, skb, false);
2253 if (ret) {
2254 rtw89_err(rtwdev, "failed to send h2c\n");
2255 goto fail;
2256 }
2257
2258 return 0;
2259 fail:
2260 dev_kfree_skb_any(skb);
2261
2262 return ret;
2263 }
2264
2265 #define H2C_LOG_CFG_LEN 12
rtw89_fw_h2c_fw_log(struct rtw89_dev * rtwdev,bool enable)2266 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
2267 {
2268 struct sk_buff *skb;
2269 u32 comp = 0;
2270 int ret;
2271
2272 if (enable)
2273 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
2274 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) |
2275 BIT(RTW89_FW_LOG_COMP_SCAN);
2276
2277 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
2278 if (!skb) {
2279 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
2280 return -ENOMEM;
2281 }
2282
2283 skb_put(skb, H2C_LOG_CFG_LEN);
2284 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD);
2285 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H));
2286 SET_LOG_CFG_COMP(skb->data, comp);
2287 SET_LOG_CFG_COMP_EXT(skb->data, 0);
2288
2289 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2290 H2C_CAT_MAC,
2291 H2C_CL_FW_INFO,
2292 H2C_FUNC_LOG_CFG, 0, 0,
2293 H2C_LOG_CFG_LEN);
2294
2295 ret = rtw89_h2c_tx(rtwdev, skb, false);
2296 if (ret) {
2297 rtw89_err(rtwdev, "failed to send h2c\n");
2298 goto fail;
2299 }
2300
2301 return 0;
2302 fail:
2303 dev_kfree_skb_any(skb);
2304
2305 return ret;
2306 }
2307
rtw89_eapol_get(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)2308 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev,
2309 struct rtw89_vif_link *rtwvif_link)
2310 {
2311 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88,
2312 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03};
2313 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
2314 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
2315 struct rtw89_eapol_2_of_2 *eapol_pkt;
2316 struct ieee80211_bss_conf *bss_conf;
2317 struct ieee80211_hdr_3addr *hdr;
2318 struct sk_buff *skb;
2319 u8 key_des_ver;
2320
2321 if (rtw_wow->ptk_alg == 3)
2322 key_des_ver = 1;
2323 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2)
2324 key_des_ver = 2;
2325 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7)
2326 key_des_ver = 3;
2327 else
2328 key_des_ver = 0;
2329
2330 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt));
2331 if (!skb)
2332 return NULL;
2333
2334 hdr = skb_put_zero(skb, sizeof(*hdr));
2335 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
2336 IEEE80211_FCTL_TODS |
2337 IEEE80211_FCTL_PROTECTED);
2338
2339 rcu_read_lock();
2340
2341 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
2342
2343 ether_addr_copy(hdr->addr1, bss_conf->bssid);
2344 ether_addr_copy(hdr->addr2, bss_conf->addr);
2345 ether_addr_copy(hdr->addr3, bss_conf->bssid);
2346
2347 rcu_read_unlock();
2348
2349 skb_put_zero(skb, sec_hdr_len);
2350
2351 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt));
2352 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody));
2353 eapol_pkt->key_des_ver = key_des_ver;
2354
2355 return skb;
2356 }
2357
rtw89_sa_query_get(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)2358 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev,
2359 struct rtw89_vif_link *rtwvif_link)
2360 {
2361 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
2362 struct ieee80211_bss_conf *bss_conf;
2363 struct ieee80211_hdr_3addr *hdr;
2364 struct rtw89_sa_query *sa_query;
2365 struct sk_buff *skb;
2366
2367 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query));
2368 if (!skb)
2369 return NULL;
2370
2371 hdr = skb_put_zero(skb, sizeof(*hdr));
2372 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2373 IEEE80211_STYPE_ACTION |
2374 IEEE80211_FCTL_PROTECTED);
2375
2376 rcu_read_lock();
2377
2378 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
2379
2380 ether_addr_copy(hdr->addr1, bss_conf->bssid);
2381 ether_addr_copy(hdr->addr2, bss_conf->addr);
2382 ether_addr_copy(hdr->addr3, bss_conf->bssid);
2383
2384 rcu_read_unlock();
2385
2386 skb_put_zero(skb, sec_hdr_len);
2387
2388 sa_query = skb_put_zero(skb, sizeof(*sa_query));
2389 sa_query->category = WLAN_CATEGORY_SA_QUERY;
2390 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE;
2391
2392 return skb;
2393 }
2394
rtw89_arp_response_get(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)2395 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev,
2396 struct rtw89_vif_link *rtwvif_link)
2397 {
2398 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
2399 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
2400 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
2401 struct ieee80211_hdr_3addr *hdr;
2402 struct rtw89_arp_rsp *arp_skb;
2403 struct arphdr *arp_hdr;
2404 struct sk_buff *skb;
2405 __le16 fc;
2406
2407 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb));
2408 if (!skb)
2409 return NULL;
2410
2411 hdr = skb_put_zero(skb, sizeof(*hdr));
2412
2413 if (rtw_wow->ptk_alg)
2414 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS |
2415 IEEE80211_FCTL_PROTECTED);
2416 else
2417 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS);
2418
2419 hdr->frame_control = fc;
2420 ether_addr_copy(hdr->addr1, rtwvif_link->bssid);
2421 ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr);
2422 ether_addr_copy(hdr->addr3, rtwvif_link->bssid);
2423
2424 skb_put_zero(skb, sec_hdr_len);
2425
2426 arp_skb = skb_put_zero(skb, sizeof(*arp_skb));
2427 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
2428 arp_skb->llc_type = htons(ETH_P_ARP);
2429
2430 arp_hdr = &arp_skb->arp_hdr;
2431 arp_hdr->ar_hrd = htons(ARPHRD_ETHER);
2432 arp_hdr->ar_pro = htons(ETH_P_IP);
2433 arp_hdr->ar_hln = ETH_ALEN;
2434 arp_hdr->ar_pln = 4;
2435 arp_hdr->ar_op = htons(ARPOP_REPLY);
2436
2437 ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr);
2438 arp_skb->sender_ip = rtwvif->ip_addr;
2439
2440 return skb;
2441 }
2442
rtw89_fw_h2c_add_general_pkt(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,enum rtw89_fw_pkt_ofld_type type,u8 * id)2443 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
2444 struct rtw89_vif_link *rtwvif_link,
2445 enum rtw89_fw_pkt_ofld_type type,
2446 u8 *id)
2447 {
2448 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
2449 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
2450 struct rtw89_pktofld_info *info;
2451 struct sk_buff *skb;
2452 int ret;
2453
2454 info = kzalloc(sizeof(*info), GFP_KERNEL);
2455 if (!info)
2456 return -ENOMEM;
2457
2458 switch (type) {
2459 case RTW89_PKT_OFLD_TYPE_PS_POLL:
2460 skb = ieee80211_pspoll_get(rtwdev->hw, vif);
2461 break;
2462 case RTW89_PKT_OFLD_TYPE_PROBE_RSP:
2463 skb = ieee80211_proberesp_get(rtwdev->hw, vif);
2464 break;
2465 case RTW89_PKT_OFLD_TYPE_NULL_DATA:
2466 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false);
2467 break;
2468 case RTW89_PKT_OFLD_TYPE_QOS_NULL:
2469 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true);
2470 break;
2471 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY:
2472 skb = rtw89_eapol_get(rtwdev, rtwvif_link);
2473 break;
2474 case RTW89_PKT_OFLD_TYPE_SA_QUERY:
2475 skb = rtw89_sa_query_get(rtwdev, rtwvif_link);
2476 break;
2477 case RTW89_PKT_OFLD_TYPE_ARP_RSP:
2478 skb = rtw89_arp_response_get(rtwdev, rtwvif_link);
2479 break;
2480 default:
2481 goto err;
2482 }
2483
2484 if (!skb)
2485 goto err;
2486
2487 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
2488 kfree_skb(skb);
2489
2490 if (ret)
2491 goto err;
2492
2493 list_add_tail(&info->list, &rtwvif_link->general_pkt_list);
2494 *id = info->id;
2495 return 0;
2496
2497 err:
2498 kfree(info);
2499 return -ENOMEM;
2500 }
2501
rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool notify_fw)2502 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
2503 struct rtw89_vif_link *rtwvif_link,
2504 bool notify_fw)
2505 {
2506 struct list_head *pkt_list = &rtwvif_link->general_pkt_list;
2507 struct rtw89_pktofld_info *info, *tmp;
2508
2509 list_for_each_entry_safe(info, tmp, pkt_list, list) {
2510 if (notify_fw)
2511 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
2512 else
2513 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id);
2514 list_del(&info->list);
2515 kfree(info);
2516 }
2517 }
2518
rtw89_fw_release_general_pkt_list(struct rtw89_dev * rtwdev,bool notify_fw)2519 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw)
2520 {
2521 struct rtw89_vif_link *rtwvif_link;
2522 struct rtw89_vif *rtwvif;
2523 unsigned int link_id;
2524
2525 rtw89_for_each_rtwvif(rtwdev, rtwvif)
2526 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
2527 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link,
2528 notify_fw);
2529 }
2530
2531 #define H2C_GENERAL_PKT_LEN 6
2532 #define H2C_GENERAL_PKT_ID_UND 0xff
rtw89_fw_h2c_general_pkt(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,u8 macid)2533 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev,
2534 struct rtw89_vif_link *rtwvif_link, u8 macid)
2535 {
2536 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND;
2537 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND;
2538 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND;
2539 struct sk_buff *skb;
2540 int ret;
2541
2542 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
2543 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll);
2544 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
2545 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null);
2546 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
2547 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null);
2548
2549 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
2550 if (!skb) {
2551 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2552 return -ENOMEM;
2553 }
2554 skb_put(skb, H2C_GENERAL_PKT_LEN);
2555 SET_GENERAL_PKT_MACID(skb->data, macid);
2556 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
2557 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll);
2558 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null);
2559 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null);
2560 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
2561
2562 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2563 H2C_CAT_MAC,
2564 H2C_CL_FW_INFO,
2565 H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
2566 H2C_GENERAL_PKT_LEN);
2567
2568 ret = rtw89_h2c_tx(rtwdev, skb, false);
2569 if (ret) {
2570 rtw89_err(rtwdev, "failed to send h2c\n");
2571 goto fail;
2572 }
2573
2574 return 0;
2575 fail:
2576 dev_kfree_skb_any(skb);
2577
2578 return ret;
2579 }
2580
2581 #define H2C_LPS_PARM_LEN 8
rtw89_fw_h2c_lps_parm(struct rtw89_dev * rtwdev,struct rtw89_lps_parm * lps_param)2582 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
2583 struct rtw89_lps_parm *lps_param)
2584 {
2585 struct sk_buff *skb;
2586 int ret;
2587
2588 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
2589 if (!skb) {
2590 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2591 return -ENOMEM;
2592 }
2593 skb_put(skb, H2C_LPS_PARM_LEN);
2594
2595 SET_LPS_PARM_MACID(skb->data, lps_param->macid);
2596 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode);
2597 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm);
2598 SET_LPS_PARM_RLBM(skb->data, 1);
2599 SET_LPS_PARM_SMARTPS(skb->data, 1);
2600 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1);
2601 SET_LPS_PARM_VOUAPSD(skb->data, 0);
2602 SET_LPS_PARM_VIUAPSD(skb->data, 0);
2603 SET_LPS_PARM_BEUAPSD(skb->data, 0);
2604 SET_LPS_PARM_BKUAPSD(skb->data, 0);
2605
2606 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2607 H2C_CAT_MAC,
2608 H2C_CL_MAC_PS,
2609 H2C_FUNC_MAC_LPS_PARM, 0, !lps_param->psmode,
2610 H2C_LPS_PARM_LEN);
2611
2612 ret = rtw89_h2c_tx(rtwdev, skb, false);
2613 if (ret) {
2614 rtw89_err(rtwdev, "failed to send h2c\n");
2615 goto fail;
2616 }
2617
2618 return 0;
2619 fail:
2620 dev_kfree_skb_any(skb);
2621
2622 return ret;
2623 }
2624
rtw89_fw_h2c_lps_ch_info(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)2625 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
2626 {
2627 const struct rtw89_chip_info *chip = rtwdev->chip;
2628 const struct rtw89_chan *chan;
2629 struct rtw89_vif_link *rtwvif_link;
2630 struct rtw89_h2c_lps_ch_info *h2c;
2631 u32 len = sizeof(*h2c);
2632 unsigned int link_id;
2633 struct sk_buff *skb;
2634 bool no_chan = true;
2635 u8 phy_idx;
2636 u32 done;
2637 int ret;
2638
2639 if (chip->chip_gen != RTW89_CHIP_BE)
2640 return 0;
2641
2642 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2643 if (!skb) {
2644 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n");
2645 return -ENOMEM;
2646 }
2647 skb_put(skb, len);
2648 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data;
2649
2650 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
2651 phy_idx = rtwvif_link->phy_idx;
2652 if (phy_idx >= ARRAY_SIZE(h2c->info))
2653 continue;
2654
2655 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
2656 no_chan = false;
2657
2658 h2c->info[phy_idx].central_ch = chan->channel;
2659 h2c->info[phy_idx].pri_ch = chan->primary_channel;
2660 h2c->info[phy_idx].band = chan->band_type;
2661 h2c->info[phy_idx].bw = chan->band_width;
2662 }
2663
2664 if (no_chan) {
2665 rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n");
2666 ret = -ENOENT;
2667 goto fail;
2668 }
2669
2670 h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode);
2671
2672 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2673 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
2674 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len);
2675
2676 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
2677 ret = rtw89_h2c_tx(rtwdev, skb, false);
2678 if (ret) {
2679 rtw89_err(rtwdev, "failed to send h2c\n");
2680 goto fail;
2681 }
2682
2683 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
2684 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
2685 if (ret)
2686 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n");
2687
2688 return 0;
2689 fail:
2690 dev_kfree_skb_any(skb);
2691
2692 return ret;
2693 }
2694
rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)2695 int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
2696 struct rtw89_vif *rtwvif)
2697 {
2698 const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
2699 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
2700 const struct rtw89_chip_info *chip = rtwdev->chip;
2701 struct rtw89_h2c_lps_ml_cmn_info *h2c;
2702 struct rtw89_vif_link *rtwvif_link;
2703 const struct rtw89_chan *chan;
2704 u8 bw_idx = RTW89_BB_BW_20_40;
2705 u32 len = sizeof(*h2c);
2706 unsigned int link_id;
2707 struct sk_buff *skb;
2708 u8 gain_band;
2709 u32 done;
2710 u8 path;
2711 int ret;
2712 int i;
2713
2714 if (chip->chip_gen != RTW89_CHIP_BE)
2715 return 0;
2716
2717 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2718 if (!skb) {
2719 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n");
2720 return -ENOMEM;
2721 }
2722 skb_put(skb, len);
2723 h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data;
2724
2725 h2c->fmt_id = 0x1;
2726
2727 h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
2728
2729 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
2730 path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A;
2731 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
2732 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
2733
2734 h2c->central_ch[rtwvif_link->phy_idx] = chan->channel;
2735 h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel;
2736 h2c->band[rtwvif_link->phy_idx] = chan->band_type;
2737 h2c->bw[rtwvif_link->phy_idx] = chan->band_width;
2738 if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6)
2739 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1;
2740 else
2741 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2;
2742
2743 /* Fill BW20 RX gain table for beacon mode */
2744 for (i = 0; i < TIA_GAIN_NUM; i++) {
2745 h2c->tia_gain[rtwvif_link->phy_idx][i] =
2746 cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]);
2747 }
2748 memcpy(h2c->lna_gain[rtwvif_link->phy_idx],
2749 gain->lna_gain[gain_band][bw_idx][path],
2750 LNA_GAIN_NUM);
2751 }
2752
2753 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2754 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
2755 H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len);
2756
2757 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
2758 ret = rtw89_h2c_tx(rtwdev, skb, false);
2759 if (ret) {
2760 rtw89_err(rtwdev, "failed to send h2c\n");
2761 goto fail;
2762 }
2763
2764 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
2765 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
2766 if (ret)
2767 rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n");
2768
2769 return 0;
2770 fail:
2771 dev_kfree_skb_any(skb);
2772
2773 return ret;
2774 }
2775
2776 #define H2C_P2P_ACT_LEN 20
rtw89_fw_h2c_p2p_act(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct ieee80211_bss_conf * bss_conf,struct ieee80211_p2p_noa_desc * desc,u8 act,u8 noa_id)2777 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev,
2778 struct rtw89_vif_link *rtwvif_link,
2779 struct ieee80211_bss_conf *bss_conf,
2780 struct ieee80211_p2p_noa_desc *desc,
2781 u8 act, u8 noa_id)
2782 {
2783 bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
2784 u8 ctwindow_oppps = bss_conf->p2p_noa_attr.oppps_ctwindow;
2785 struct sk_buff *skb;
2786 u8 *cmd;
2787 int ret;
2788
2789 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN);
2790 if (!skb) {
2791 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
2792 return -ENOMEM;
2793 }
2794 skb_put(skb, H2C_P2P_ACT_LEN);
2795 cmd = skb->data;
2796
2797 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id);
2798 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
2799 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
2800 RTW89_SET_FWCMD_P2P_ACT(cmd, act);
2801 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc);
2802 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0);
2803 if (desc) {
2804 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time);
2805 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval);
2806 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration);
2807 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count);
2808 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps);
2809 }
2810
2811 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2812 H2C_CAT_MAC, H2C_CL_MAC_PS,
2813 H2C_FUNC_P2P_ACT, 0, 0,
2814 H2C_P2P_ACT_LEN);
2815
2816 ret = rtw89_h2c_tx(rtwdev, skb, false);
2817 if (ret) {
2818 rtw89_err(rtwdev, "failed to send h2c\n");
2819 goto fail;
2820 }
2821
2822 return 0;
2823 fail:
2824 dev_kfree_skb_any(skb);
2825
2826 return ret;
2827 }
2828
__rtw89_fw_h2c_set_tx_path(struct rtw89_dev * rtwdev,struct sk_buff * skb)2829 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
2830 struct sk_buff *skb)
2831 {
2832 const struct rtw89_chip_info *chip = rtwdev->chip;
2833 struct rtw89_hal *hal = &rtwdev->hal;
2834 u8 ntx_path;
2835 u8 map_b;
2836
2837 if (chip->rf_path_num == 1) {
2838 ntx_path = RF_A;
2839 map_b = 0;
2840 } else {
2841 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
2842 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
2843 }
2844
2845 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
2846 SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
2847 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
2848 SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
2849 SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
2850 }
2851
2852 #define H2C_CMC_TBL_LEN 68
rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)2853 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
2854 struct rtw89_vif_link *rtwvif_link,
2855 struct rtw89_sta_link *rtwsta_link)
2856 {
2857 const struct rtw89_chip_info *chip = rtwdev->chip;
2858 u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
2859 struct sk_buff *skb;
2860 int ret;
2861
2862 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
2863 if (!skb) {
2864 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2865 return -ENOMEM;
2866 }
2867 skb_put(skb, H2C_CMC_TBL_LEN);
2868 SET_CTRL_INFO_MACID(skb->data, macid);
2869 SET_CTRL_INFO_OPERATION(skb->data, 1);
2870 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
2871 SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
2872 __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
2873 SET_CMC_TBL_ANTSEL_A(skb->data, 0);
2874 SET_CMC_TBL_ANTSEL_B(skb->data, 0);
2875 SET_CMC_TBL_ANTSEL_C(skb->data, 0);
2876 SET_CMC_TBL_ANTSEL_D(skb->data, 0);
2877 }
2878 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
2879 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
2880 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
2881 SET_CMC_TBL_DATA_DCM(skb->data, 0);
2882
2883 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2884 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
2885 chip->h2c_cctl_func_id, 0, 1,
2886 H2C_CMC_TBL_LEN);
2887
2888 ret = rtw89_h2c_tx(rtwdev, skb, false);
2889 if (ret) {
2890 rtw89_err(rtwdev, "failed to send h2c\n");
2891 goto fail;
2892 }
2893
2894 return 0;
2895 fail:
2896 dev_kfree_skb_any(skb);
2897
2898 return ret;
2899 }
2900 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl);
2901
rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)2902 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
2903 struct rtw89_vif_link *rtwvif_link,
2904 struct rtw89_sta_link *rtwsta_link)
2905 {
2906 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
2907 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
2908 u32 len = sizeof(*h2c);
2909 struct sk_buff *skb;
2910 int ret;
2911
2912 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2913 if (!skb) {
2914 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
2915 return -ENOMEM;
2916 }
2917 skb_put(skb, len);
2918 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
2919
2920 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
2921 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
2922
2923 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE);
2924 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL);
2925
2926 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) |
2927 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) |
2928 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
2929 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL);
2930
2931 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL);
2932
2933 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL);
2934
2935 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW);
2936 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL);
2937
2938 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
2939 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
2940 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
2941 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
2942 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
2943 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL);
2944
2945 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE);
2946 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL);
2947
2948 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) |
2949 le32_encode_bits(1, CCTLINFO_G7_W7_NR) |
2950 le32_encode_bits(1, CCTLINFO_G7_W7_CB) |
2951 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) |
2952 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE);
2953 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL);
2954
2955 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL);
2956
2957 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) |
2958 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) |
2959 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L);
2960 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL);
2961
2962 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) |
2963 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) |
2964 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE);
2965 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL);
2966
2967 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2968 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
2969 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
2970 len);
2971
2972 ret = rtw89_h2c_tx(rtwdev, skb, false);
2973 if (ret) {
2974 rtw89_err(rtwdev, "failed to send h2c\n");
2975 goto fail;
2976 }
2977
2978 return 0;
2979 fail:
2980 dev_kfree_skb_any(skb);
2981
2982 return ret;
2983 }
2984 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7);
2985
__get_sta_he_pkt_padding(struct rtw89_dev * rtwdev,struct ieee80211_link_sta * link_sta,u8 * pads)2986 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
2987 struct ieee80211_link_sta *link_sta,
2988 u8 *pads)
2989 {
2990 bool ppe_th;
2991 u8 ppe16, ppe8;
2992 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
2993 u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0];
2994 u8 ru_bitmap;
2995 u8 n, idx, sh;
2996 u16 ppe;
2997 int i;
2998
2999 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
3000 link_sta->he_cap.he_cap_elem.phy_cap_info[6]);
3001 if (!ppe_th) {
3002 u8 pad;
3003
3004 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
3005 link_sta->he_cap.he_cap_elem.phy_cap_info[9]);
3006
3007 for (i = 0; i < RTW89_PPE_BW_NUM; i++)
3008 pads[i] = pad;
3009
3010 return;
3011 }
3012
3013 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
3014 n = hweight8(ru_bitmap);
3015 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
3016
3017 for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
3018 if (!(ru_bitmap & BIT(i))) {
3019 pads[i] = 1;
3020 continue;
3021 }
3022
3023 idx = n >> 3;
3024 sh = n & 7;
3025 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
3026
3027 ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx]));
3028 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3029 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
3030 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3031
3032 if (ppe16 != 7 && ppe8 == 7)
3033 pads[i] = RTW89_PE_DURATION_16;
3034 else if (ppe8 != 7)
3035 pads[i] = RTW89_PE_DURATION_8;
3036 else
3037 pads[i] = RTW89_PE_DURATION_0;
3038 }
3039 }
3040
rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3041 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
3042 struct rtw89_vif_link *rtwvif_link,
3043 struct rtw89_sta_link *rtwsta_link)
3044 {
3045 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3046 const struct rtw89_chip_info *chip = rtwdev->chip;
3047 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
3048 rtwvif_link->chanctx_idx);
3049 struct ieee80211_link_sta *link_sta;
3050 struct sk_buff *skb;
3051 u8 pads[RTW89_PPE_BW_NUM];
3052 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3053 u16 lowest_rate;
3054 int ret;
3055
3056 memset(pads, 0, sizeof(pads));
3057
3058 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3059 if (!skb) {
3060 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3061 return -ENOMEM;
3062 }
3063
3064 rcu_read_lock();
3065
3066 if (rtwsta_link)
3067 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
3068
3069 if (rtwsta_link && link_sta->he_cap.has_he)
3070 __get_sta_he_pkt_padding(rtwdev, link_sta, pads);
3071
3072 if (vif->p2p)
3073 lowest_rate = RTW89_HW_RATE_OFDM6;
3074 else if (chan->band_type == RTW89_BAND_2G)
3075 lowest_rate = RTW89_HW_RATE_CCK1;
3076 else
3077 lowest_rate = RTW89_HW_RATE_OFDM6;
3078
3079 skb_put(skb, H2C_CMC_TBL_LEN);
3080 SET_CTRL_INFO_MACID(skb->data, mac_id);
3081 SET_CTRL_INFO_OPERATION(skb->data, 1);
3082 SET_CMC_TBL_DISRTSFB(skb->data, 1);
3083 SET_CMC_TBL_DISDATAFB(skb->data, 1);
3084 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate);
3085 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
3086 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
3087 if (vif->type == NL80211_IFTYPE_STATION)
3088 SET_CMC_TBL_ULDL(skb->data, 1);
3089 else
3090 SET_CMC_TBL_ULDL(skb->data, 0);
3091 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port);
3092 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
3093 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
3094 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
3095 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
3096 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
3097 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
3098 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
3099 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
3100 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
3101 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
3102 }
3103 if (rtwsta_link)
3104 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
3105 link_sta->he_cap.has_he);
3106 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
3107 SET_CMC_TBL_DATA_DCM(skb->data, 0);
3108
3109 rcu_read_unlock();
3110
3111 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3112 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3113 chip->h2c_cctl_func_id, 0, 1,
3114 H2C_CMC_TBL_LEN);
3115
3116 ret = rtw89_h2c_tx(rtwdev, skb, false);
3117 if (ret) {
3118 rtw89_err(rtwdev, "failed to send h2c\n");
3119 goto fail;
3120 }
3121
3122 return 0;
3123 fail:
3124 dev_kfree_skb_any(skb);
3125
3126 return ret;
3127 }
3128 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl);
3129
__get_sta_eht_pkt_padding(struct rtw89_dev * rtwdev,struct ieee80211_link_sta * link_sta,u8 * pads)3130 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
3131 struct ieee80211_link_sta *link_sta,
3132 u8 *pads)
3133 {
3134 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
3135 u16 ppe_thres_hdr;
3136 u8 ppe16, ppe8;
3137 u8 n, idx, sh;
3138 u8 ru_bitmap;
3139 bool ppe_th;
3140 u16 ppe;
3141 int i;
3142
3143 ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
3144 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT);
3145 if (!ppe_th) {
3146 u8 pad;
3147
3148 pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
3149 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
3150
3151 for (i = 0; i < RTW89_PPE_BW_NUM; i++)
3152 pads[i] = pad;
3153
3154 return;
3155 }
3156
3157 ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres);
3158 ru_bitmap = u16_get_bits(ppe_thres_hdr,
3159 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
3160 n = hweight8(ru_bitmap);
3161 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE +
3162 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
3163
3164 for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
3165 if (!(ru_bitmap & BIT(i))) {
3166 pads[i] = 1;
3167 continue;
3168 }
3169
3170 idx = n >> 3;
3171 sh = n & 7;
3172 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2;
3173
3174 ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx);
3175 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3176 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE;
3177 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3178
3179 if (ppe16 != 7 && ppe8 == 7)
3180 pads[i] = RTW89_PE_DURATION_16_20;
3181 else if (ppe8 != 7)
3182 pads[i] = RTW89_PE_DURATION_8;
3183 else
3184 pads[i] = RTW89_PE_DURATION_0;
3185 }
3186 }
3187
rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3188 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3189 struct rtw89_vif_link *rtwvif_link,
3190 struct rtw89_sta_link *rtwsta_link)
3191 {
3192 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3193 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
3194 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3195 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3196 struct ieee80211_bss_conf *bss_conf;
3197 struct ieee80211_link_sta *link_sta;
3198 u8 pads[RTW89_PPE_BW_NUM];
3199 u32 len = sizeof(*h2c);
3200 struct sk_buff *skb;
3201 u16 lowest_rate;
3202 int ret;
3203
3204 memset(pads, 0, sizeof(pads));
3205
3206 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3207 if (!skb) {
3208 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
3209 return -ENOMEM;
3210 }
3211
3212 rcu_read_lock();
3213
3214 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
3215
3216 if (rtwsta_link) {
3217 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
3218
3219 if (link_sta->eht_cap.has_eht)
3220 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads);
3221 else if (link_sta->he_cap.has_he)
3222 __get_sta_he_pkt_padding(rtwdev, link_sta, pads);
3223 }
3224
3225 if (vif->p2p)
3226 lowest_rate = RTW89_HW_RATE_OFDM6;
3227 else if (chan->band_type == RTW89_BAND_2G)
3228 lowest_rate = RTW89_HW_RATE_CCK1;
3229 else
3230 lowest_rate = RTW89_HW_RATE_OFDM6;
3231
3232 skb_put(skb, len);
3233 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3234
3235 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
3236 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3237
3238 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) |
3239 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB);
3240 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB |
3241 CCTLINFO_G7_W0_DISDATAFB);
3242
3243 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
3244 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
3245
3246 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
3247 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
3248
3249 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
3250 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
3251
3252 h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID);
3253 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID);
3254
3255 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) {
3256 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM);
3257 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM);
3258 }
3259
3260 if (bss_conf->eht_support) {
3261 u16 punct = bss_conf->chanreq.oper.punctured;
3262
3263 h2c->w4 |= le32_encode_bits(~punct,
3264 CCTLINFO_G7_W4_ACT_SUBCH_CBW);
3265 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW);
3266 }
3267
3268 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20],
3269 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
3270 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40],
3271 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
3272 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80],
3273 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
3274 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160],
3275 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
3276 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320],
3277 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
3278 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 |
3279 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 |
3280 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 |
3281 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 |
3282 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
3283
3284 h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
3285 CCTLINFO_G7_W6_ULDL);
3286 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL);
3287
3288 if (rtwsta_link) {
3289 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he,
3290 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
3291 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
3292 }
3293
3294 rcu_read_unlock();
3295
3296 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3297 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3298 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
3299 len);
3300
3301 ret = rtw89_h2c_tx(rtwdev, skb, false);
3302 if (ret) {
3303 rtw89_err(rtwdev, "failed to send h2c\n");
3304 goto fail;
3305 }
3306
3307 return 0;
3308 fail:
3309 dev_kfree_skb_any(skb);
3310
3311 return ret;
3312 }
3313 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7);
3314
rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3315 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3316 struct rtw89_vif_link *rtwvif_link,
3317 struct rtw89_sta_link *rtwsta_link)
3318 {
3319 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
3320 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3321 u32 len = sizeof(*h2c);
3322 struct sk_buff *skb;
3323 u16 agg_num = 0;
3324 u8 ba_bmap = 0;
3325 int ret;
3326 u8 tid;
3327
3328 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3329 if (!skb) {
3330 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n");
3331 return -ENOMEM;
3332 }
3333 skb_put(skb, len);
3334 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3335
3336 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) {
3337 if (agg_num == 0)
3338 agg_num = rtwsta->ampdu_params[tid].agg_num;
3339 else
3340 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num);
3341 }
3342
3343 if (agg_num <= 0x20)
3344 ba_bmap = 3;
3345 else if (agg_num > 0x20 && agg_num <= 0x40)
3346 ba_bmap = 0;
3347 else if (agg_num > 0x40 && agg_num <= 0x80)
3348 ba_bmap = 1;
3349 else if (agg_num > 0x80 && agg_num <= 0x100)
3350 ba_bmap = 2;
3351 else if (agg_num > 0x100 && agg_num <= 0x200)
3352 ba_bmap = 4;
3353 else if (agg_num > 0x200 && agg_num <= 0x400)
3354 ba_bmap = 5;
3355
3356 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) |
3357 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3358
3359 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP);
3360 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP);
3361
3362 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3363 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3364 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0,
3365 len);
3366
3367 ret = rtw89_h2c_tx(rtwdev, skb, false);
3368 if (ret) {
3369 rtw89_err(rtwdev, "failed to send h2c\n");
3370 goto fail;
3371 }
3372
3373 return 0;
3374 fail:
3375 dev_kfree_skb_any(skb);
3376
3377 return ret;
3378 }
3379 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7);
3380
rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link)3381 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
3382 struct rtw89_sta_link *rtwsta_link)
3383 {
3384 const struct rtw89_chip_info *chip = rtwdev->chip;
3385 struct sk_buff *skb;
3386 int ret;
3387
3388 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3389 if (!skb) {
3390 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3391 return -ENOMEM;
3392 }
3393 skb_put(skb, H2C_CMC_TBL_LEN);
3394 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id);
3395 SET_CTRL_INFO_OPERATION(skb->data, 1);
3396 if (rtwsta_link->cctl_tx_time) {
3397 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
3398 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time);
3399 }
3400 if (rtwsta_link->cctl_tx_retry_limit) {
3401 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
3402 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt);
3403 }
3404
3405 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3406 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3407 chip->h2c_cctl_func_id, 0, 1,
3408 H2C_CMC_TBL_LEN);
3409
3410 ret = rtw89_h2c_tx(rtwdev, skb, false);
3411 if (ret) {
3412 rtw89_err(rtwdev, "failed to send h2c\n");
3413 goto fail;
3414 }
3415
3416 return 0;
3417 fail:
3418 dev_kfree_skb_any(skb);
3419
3420 return ret;
3421 }
3422
rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link)3423 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
3424 struct rtw89_sta_link *rtwsta_link)
3425 {
3426 const struct rtw89_chip_info *chip = rtwdev->chip;
3427 struct sk_buff *skb;
3428 int ret;
3429
3430 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD)
3431 return 0;
3432
3433 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3434 if (!skb) {
3435 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3436 return -ENOMEM;
3437 }
3438 skb_put(skb, H2C_CMC_TBL_LEN);
3439 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id);
3440 SET_CTRL_INFO_OPERATION(skb->data, 1);
3441
3442 __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
3443
3444 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3445 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3446 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
3447 H2C_CMC_TBL_LEN);
3448
3449 ret = rtw89_h2c_tx(rtwdev, skb, false);
3450 if (ret) {
3451 rtw89_err(rtwdev, "failed to send h2c\n");
3452 goto fail;
3453 }
3454
3455 return 0;
3456 fail:
3457 dev_kfree_skb_any(skb);
3458
3459 return ret;
3460 }
3461
rtw89_fw_h2c_update_beacon(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)3462 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
3463 struct rtw89_vif_link *rtwvif_link)
3464 {
3465 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
3466 rtwvif_link->chanctx_idx);
3467 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3468 struct rtw89_h2c_bcn_upd *h2c;
3469 struct sk_buff *skb_beacon;
3470 struct ieee80211_hdr *hdr;
3471 u32 len = sizeof(*h2c);
3472 struct sk_buff *skb;
3473 int bcn_total_len;
3474 u16 beacon_rate;
3475 u16 tim_offset;
3476 void *noa_data;
3477 u8 noa_len;
3478 int ret;
3479
3480 if (vif->p2p)
3481 beacon_rate = RTW89_HW_RATE_OFDM6;
3482 else if (chan->band_type == RTW89_BAND_2G)
3483 beacon_rate = RTW89_HW_RATE_CCK1;
3484 else
3485 beacon_rate = RTW89_HW_RATE_OFDM6;
3486
3487 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
3488 NULL, 0);
3489 if (!skb_beacon) {
3490 rtw89_err(rtwdev, "failed to get beacon skb\n");
3491 return -ENOMEM;
3492 }
3493
3494 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data);
3495 if (noa_len &&
3496 (noa_len <= skb_tailroom(skb_beacon) ||
3497 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
3498 skb_put_data(skb_beacon, noa_data, noa_len);
3499 }
3500
3501 hdr = (struct ieee80211_hdr *)skb_beacon;
3502 tim_offset -= ieee80211_hdrlen(hdr->frame_control);
3503
3504 bcn_total_len = len + skb_beacon->len;
3505 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
3506 if (!skb) {
3507 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3508 dev_kfree_skb_any(skb_beacon);
3509 return -ENOMEM;
3510 }
3511 skb_put(skb, len);
3512 h2c = (struct rtw89_h2c_bcn_upd *)skb->data;
3513
3514 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) |
3515 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) |
3516 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) |
3517 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST);
3518 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) |
3519 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) |
3520 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) |
3521 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE);
3522
3523 skb_put_data(skb, skb_beacon->data, skb_beacon->len);
3524 dev_kfree_skb_any(skb_beacon);
3525
3526 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3527 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3528 H2C_FUNC_MAC_BCN_UPD, 0, 1,
3529 bcn_total_len);
3530
3531 ret = rtw89_h2c_tx(rtwdev, skb, false);
3532 if (ret) {
3533 rtw89_err(rtwdev, "failed to send h2c\n");
3534 dev_kfree_skb_any(skb);
3535 return ret;
3536 }
3537
3538 return 0;
3539 }
3540 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon);
3541
rtw89_fw_h2c_update_beacon_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)3542 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
3543 struct rtw89_vif_link *rtwvif_link)
3544 {
3545 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
3546 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3547 struct rtw89_h2c_bcn_upd_be *h2c;
3548 struct sk_buff *skb_beacon;
3549 struct ieee80211_hdr *hdr;
3550 u32 len = sizeof(*h2c);
3551 struct sk_buff *skb;
3552 int bcn_total_len;
3553 u16 beacon_rate;
3554 u16 tim_offset;
3555 void *noa_data;
3556 u8 noa_len;
3557 int ret;
3558
3559 if (vif->p2p)
3560 beacon_rate = RTW89_HW_RATE_OFDM6;
3561 else if (chan->band_type == RTW89_BAND_2G)
3562 beacon_rate = RTW89_HW_RATE_CCK1;
3563 else
3564 beacon_rate = RTW89_HW_RATE_OFDM6;
3565
3566 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
3567 NULL, 0);
3568 if (!skb_beacon) {
3569 rtw89_err(rtwdev, "failed to get beacon skb\n");
3570 return -ENOMEM;
3571 }
3572
3573 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data);
3574 if (noa_len &&
3575 (noa_len <= skb_tailroom(skb_beacon) ||
3576 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
3577 skb_put_data(skb_beacon, noa_data, noa_len);
3578 }
3579
3580 hdr = (struct ieee80211_hdr *)skb_beacon;
3581 tim_offset -= ieee80211_hdrlen(hdr->frame_control);
3582
3583 bcn_total_len = len + skb_beacon->len;
3584 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
3585 if (!skb) {
3586 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3587 dev_kfree_skb_any(skb_beacon);
3588 return -ENOMEM;
3589 }
3590 skb_put(skb, len);
3591 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data;
3592
3593 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) |
3594 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) |
3595 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) |
3596 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST);
3597 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) |
3598 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) |
3599 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) |
3600 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE);
3601
3602 skb_put_data(skb, skb_beacon->data, skb_beacon->len);
3603 dev_kfree_skb_any(skb_beacon);
3604
3605 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3606 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3607 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1,
3608 bcn_total_len);
3609
3610 ret = rtw89_h2c_tx(rtwdev, skb, false);
3611 if (ret) {
3612 rtw89_err(rtwdev, "failed to send h2c\n");
3613 goto fail;
3614 }
3615
3616 return 0;
3617
3618 fail:
3619 dev_kfree_skb_any(skb);
3620
3621 return ret;
3622 }
3623 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
3624
3625 #define H2C_ROLE_MAINTAIN_LEN 4
rtw89_fw_h2c_role_maintain(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,enum rtw89_upd_mode upd_mode)3626 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
3627 struct rtw89_vif_link *rtwvif_link,
3628 struct rtw89_sta_link *rtwsta_link,
3629 enum rtw89_upd_mode upd_mode)
3630 {
3631 struct sk_buff *skb;
3632 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3633 u8 self_role;
3634 int ret;
3635
3636 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) {
3637 if (rtwsta_link)
3638 self_role = RTW89_SELF_ROLE_AP_CLIENT;
3639 else
3640 self_role = rtwvif_link->self_role;
3641 } else {
3642 self_role = rtwvif_link->self_role;
3643 }
3644
3645 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN);
3646 if (!skb) {
3647 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
3648 return -ENOMEM;
3649 }
3650 skb_put(skb, H2C_ROLE_MAINTAIN_LEN);
3651 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
3652 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
3653 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
3654 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif_link->wifi_role);
3655
3656 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3657 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
3658 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
3659 H2C_ROLE_MAINTAIN_LEN);
3660
3661 ret = rtw89_h2c_tx(rtwdev, skb, false);
3662 if (ret) {
3663 rtw89_err(rtwdev, "failed to send h2c\n");
3664 goto fail;
3665 }
3666
3667 return 0;
3668 fail:
3669 dev_kfree_skb_any(skb);
3670
3671 return ret;
3672 }
3673
3674 static enum rtw89_fw_sta_type
rtw89_fw_get_sta_type(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3675 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
3676 struct rtw89_sta_link *rtwsta_link)
3677 {
3678 struct ieee80211_bss_conf *bss_conf;
3679 struct ieee80211_link_sta *link_sta;
3680 enum rtw89_fw_sta_type type;
3681
3682 rcu_read_lock();
3683
3684 if (!rtwsta_link)
3685 goto by_vif;
3686
3687 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
3688
3689 if (link_sta->eht_cap.has_eht)
3690 type = RTW89_FW_BE_STA;
3691 else if (link_sta->he_cap.has_he)
3692 type = RTW89_FW_AX_STA;
3693 else
3694 type = RTW89_FW_N_AC_STA;
3695
3696 goto out;
3697
3698 by_vif:
3699 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
3700
3701 if (bss_conf->eht_support)
3702 type = RTW89_FW_BE_STA;
3703 else if (bss_conf->he_support)
3704 type = RTW89_FW_AX_STA;
3705 else
3706 type = RTW89_FW_N_AC_STA;
3707
3708 out:
3709 rcu_read_unlock();
3710
3711 return type;
3712 }
3713
rtw89_fw_h2c_join_info(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,bool dis_conn)3714 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
3715 struct rtw89_sta_link *rtwsta_link, bool dis_conn)
3716 {
3717 struct sk_buff *skb;
3718 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3719 u8 self_role = rtwvif_link->self_role;
3720 enum rtw89_fw_sta_type sta_type;
3721 u8 net_type = rtwvif_link->net_type;
3722 struct rtw89_h2c_join_v1 *h2c_v1;
3723 struct rtw89_h2c_join *h2c;
3724 u32 len = sizeof(*h2c);
3725 bool format_v1 = false;
3726 int ret;
3727
3728 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
3729 len = sizeof(*h2c_v1);
3730 format_v1 = true;
3731 }
3732
3733 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) {
3734 self_role = RTW89_SELF_ROLE_AP_CLIENT;
3735 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
3736 }
3737
3738 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3739 if (!skb) {
3740 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
3741 return -ENOMEM;
3742 }
3743 skb_put(skb, len);
3744 h2c = (struct rtw89_h2c_join *)skb->data;
3745
3746 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) |
3747 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) |
3748 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) |
3749 le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) |
3750 le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) |
3751 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) |
3752 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) |
3753 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) |
3754 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) |
3755 le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) |
3756 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) |
3757 le32_encode_bits(rtwvif_link->wifi_role,
3758 RTW89_H2C_JOININFO_W0_WIFI_ROLE) |
3759 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE);
3760
3761 if (!format_v1)
3762 goto done;
3763
3764 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data;
3765
3766 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link);
3767
3768 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE);
3769 h2c_v1->w2 = 0;
3770
3771 done:
3772 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3773 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
3774 H2C_FUNC_MAC_JOININFO, 0, 1,
3775 len);
3776
3777 ret = rtw89_h2c_tx(rtwdev, skb, false);
3778 if (ret) {
3779 rtw89_err(rtwdev, "failed to send h2c\n");
3780 goto fail;
3781 }
3782
3783 return 0;
3784 fail:
3785 dev_kfree_skb_any(skb);
3786
3787 return ret;
3788 }
3789
rtw89_fw_h2c_notify_dbcc(struct rtw89_dev * rtwdev,bool en)3790 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en)
3791 {
3792 struct rtw89_h2c_notify_dbcc *h2c;
3793 u32 len = sizeof(*h2c);
3794 struct sk_buff *skb;
3795 int ret;
3796
3797 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3798 if (!skb) {
3799 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n");
3800 return -ENOMEM;
3801 }
3802 skb_put(skb, len);
3803 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data;
3804
3805 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN);
3806
3807 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3808 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
3809 H2C_FUNC_NOTIFY_DBCC, 0, 1,
3810 len);
3811
3812 ret = rtw89_h2c_tx(rtwdev, skb, false);
3813 if (ret) {
3814 rtw89_err(rtwdev, "failed to send h2c\n");
3815 goto fail;
3816 }
3817
3818 return 0;
3819 fail:
3820 dev_kfree_skb_any(skb);
3821
3822 return ret;
3823 }
3824
rtw89_fw_h2c_macid_pause(struct rtw89_dev * rtwdev,u8 sh,u8 grp,bool pause)3825 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
3826 bool pause)
3827 {
3828 struct rtw89_fw_macid_pause_sleep_grp *h2c_new;
3829 struct rtw89_fw_macid_pause_grp *h2c;
3830 __le32 set = cpu_to_le32(BIT(sh));
3831 u8 h2c_macid_pause_id;
3832 struct sk_buff *skb;
3833 u32 len;
3834 int ret;
3835
3836 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) {
3837 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP;
3838 len = sizeof(*h2c_new);
3839 } else {
3840 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE;
3841 len = sizeof(*h2c);
3842 }
3843
3844 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3845 if (!skb) {
3846 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n");
3847 return -ENOMEM;
3848 }
3849 skb_put(skb, len);
3850
3851 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) {
3852 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data;
3853
3854 h2c_new->n[0].pause_mask_grp[grp] = set;
3855 h2c_new->n[0].sleep_mask_grp[grp] = set;
3856 if (pause) {
3857 h2c_new->n[0].pause_grp[grp] = set;
3858 h2c_new->n[0].sleep_grp[grp] = set;
3859 }
3860 } else {
3861 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data;
3862
3863 h2c->mask_grp[grp] = set;
3864 if (pause)
3865 h2c->pause_grp[grp] = set;
3866 }
3867
3868 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3869 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3870 h2c_macid_pause_id, 1, 0,
3871 len);
3872
3873 ret = rtw89_h2c_tx(rtwdev, skb, false);
3874 if (ret) {
3875 rtw89_err(rtwdev, "failed to send h2c\n");
3876 goto fail;
3877 }
3878
3879 return 0;
3880 fail:
3881 dev_kfree_skb_any(skb);
3882
3883 return ret;
3884 }
3885
3886 #define H2C_EDCA_LEN 12
rtw89_fw_h2c_set_edca(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,u8 ac,u32 val)3887 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
3888 u8 ac, u32 val)
3889 {
3890 struct sk_buff *skb;
3891 int ret;
3892
3893 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
3894 if (!skb) {
3895 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
3896 return -ENOMEM;
3897 }
3898 skb_put(skb, H2C_EDCA_LEN);
3899 RTW89_SET_EDCA_SEL(skb->data, 0);
3900 RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx);
3901 RTW89_SET_EDCA_WMM(skb->data, 0);
3902 RTW89_SET_EDCA_AC(skb->data, ac);
3903 RTW89_SET_EDCA_PARAM(skb->data, val);
3904
3905 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3906 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3907 H2C_FUNC_USR_EDCA, 0, 1,
3908 H2C_EDCA_LEN);
3909
3910 ret = rtw89_h2c_tx(rtwdev, skb, false);
3911 if (ret) {
3912 rtw89_err(rtwdev, "failed to send h2c\n");
3913 goto fail;
3914 }
3915
3916 return 0;
3917 fail:
3918 dev_kfree_skb_any(skb);
3919
3920 return ret;
3921 }
3922
3923 #define H2C_TSF32_TOGL_LEN 4
rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool en)3924 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev,
3925 struct rtw89_vif_link *rtwvif_link,
3926 bool en)
3927 {
3928 struct sk_buff *skb;
3929 u16 early_us = en ? 2000 : 0;
3930 u8 *cmd;
3931 int ret;
3932
3933 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN);
3934 if (!skb) {
3935 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
3936 return -ENOMEM;
3937 }
3938 skb_put(skb, H2C_TSF32_TOGL_LEN);
3939 cmd = skb->data;
3940
3941 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx);
3942 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
3943 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port);
3944 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
3945
3946 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3947 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3948 H2C_FUNC_TSF32_TOGL, 0, 0,
3949 H2C_TSF32_TOGL_LEN);
3950
3951 ret = rtw89_h2c_tx(rtwdev, skb, false);
3952 if (ret) {
3953 rtw89_err(rtwdev, "failed to send h2c\n");
3954 goto fail;
3955 }
3956
3957 return 0;
3958 fail:
3959 dev_kfree_skb_any(skb);
3960
3961 return ret;
3962 }
3963
3964 #define H2C_OFLD_CFG_LEN 8
rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev * rtwdev)3965 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
3966 {
3967 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
3968 struct sk_buff *skb;
3969 int ret;
3970
3971 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
3972 if (!skb) {
3973 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
3974 return -ENOMEM;
3975 }
3976 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN);
3977
3978 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3979 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3980 H2C_FUNC_OFLD_CFG, 0, 1,
3981 H2C_OFLD_CFG_LEN);
3982
3983 ret = rtw89_h2c_tx(rtwdev, skb, false);
3984 if (ret) {
3985 rtw89_err(rtwdev, "failed to send h2c\n");
3986 goto fail;
3987 }
3988
3989 return 0;
3990 fail:
3991 dev_kfree_skb_any(skb);
3992
3993 return ret;
3994 }
3995
rtw89_fw_h2c_tx_duty(struct rtw89_dev * rtwdev,u8 lv)3996 int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv)
3997 {
3998 struct rtw89_h2c_tx_duty *h2c;
3999 u32 len = sizeof(*h2c);
4000 struct sk_buff *skb;
4001 u16 pause, active;
4002 int ret;
4003
4004 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4005 if (!skb) {
4006 rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n");
4007 return -ENOMEM;
4008 }
4009
4010 skb_put(skb, len);
4011 h2c = (struct rtw89_h2c_tx_duty *)skb->data;
4012
4013 static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100);
4014
4015 if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) {
4016 h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP);
4017 } else {
4018 active = 100 - lv * RTW89_THERMAL_PROT_STEP;
4019 pause = 100 - active;
4020
4021 h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) |
4022 le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK);
4023 }
4024
4025 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4026 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4027 H2C_FUNC_TX_DUTY, 0, 0, len);
4028
4029 ret = rtw89_h2c_tx(rtwdev, skb, false);
4030 if (ret) {
4031 rtw89_err(rtwdev, "failed to send h2c\n");
4032 goto fail;
4033 }
4034
4035 return 0;
4036 fail:
4037 dev_kfree_skb_any(skb);
4038
4039 return ret;
4040 }
4041
rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool connect)4042 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
4043 struct rtw89_vif_link *rtwvif_link,
4044 bool connect)
4045 {
4046 struct ieee80211_bss_conf *bss_conf;
4047 s32 thold = RTW89_DEFAULT_CQM_THOLD;
4048 u32 hyst = RTW89_DEFAULT_CQM_HYST;
4049 struct rtw89_h2c_bcnfltr *h2c;
4050 u32 len = sizeof(*h2c);
4051 struct sk_buff *skb;
4052 int ret;
4053
4054 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
4055 return -EINVAL;
4056
4057 if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA)
4058 return -EINVAL;
4059
4060 rcu_read_lock();
4061
4062 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
4063
4064 if (bss_conf->cqm_rssi_hyst)
4065 hyst = bss_conf->cqm_rssi_hyst;
4066 if (bss_conf->cqm_rssi_thold)
4067 thold = bss_conf->cqm_rssi_thold;
4068
4069 rcu_read_unlock();
4070
4071 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4072 if (!skb) {
4073 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n");
4074 return -ENOMEM;
4075 }
4076
4077 skb_put(skb, len);
4078 h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
4079
4080 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
4081 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
4082 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
4083 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT,
4084 RTW89_H2C_BCNFLTR_W0_MODE) |
4085 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) |
4086 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
4087 le32_encode_bits(thold + MAX_RSSI,
4088 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
4089 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID);
4090
4091 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4092 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4093 H2C_FUNC_CFG_BCNFLTR, 0, 1, len);
4094
4095 ret = rtw89_h2c_tx(rtwdev, skb, false);
4096 if (ret) {
4097 rtw89_err(rtwdev, "failed to send h2c\n");
4098 goto fail;
4099 }
4100
4101 return 0;
4102 fail:
4103 dev_kfree_skb_any(skb);
4104
4105 return ret;
4106 }
4107
rtw89_fw_h2c_rssi_offload(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu)4108 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev,
4109 struct rtw89_rx_phy_ppdu *phy_ppdu)
4110 {
4111 struct rtw89_h2c_ofld_rssi *h2c;
4112 u32 len = sizeof(*h2c);
4113 struct sk_buff *skb;
4114 s8 rssi;
4115 int ret;
4116
4117 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
4118 return -EINVAL;
4119
4120 if (!phy_ppdu)
4121 return -EINVAL;
4122
4123 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4124 if (!skb) {
4125 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n");
4126 return -ENOMEM;
4127 }
4128
4129 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR;
4130 skb_put(skb, len);
4131 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data;
4132
4133 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) |
4134 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM);
4135 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL);
4136
4137 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4138 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4139 H2C_FUNC_OFLD_RSSI, 0, 1, len);
4140
4141 ret = rtw89_h2c_tx(rtwdev, skb, false);
4142 if (ret) {
4143 rtw89_err(rtwdev, "failed to send h2c\n");
4144 goto fail;
4145 }
4146
4147 return 0;
4148 fail:
4149 dev_kfree_skb_any(skb);
4150
4151 return ret;
4152 }
4153
rtw89_fw_h2c_tp_offload(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)4154 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
4155 {
4156 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
4157 struct rtw89_traffic_stats *stats = &rtwvif->stats;
4158 struct rtw89_h2c_ofld *h2c;
4159 u32 len = sizeof(*h2c);
4160 struct sk_buff *skb;
4161 int ret;
4162
4163 if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA)
4164 return -EINVAL;
4165
4166 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4167 if (!skb) {
4168 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n");
4169 return -ENOMEM;
4170 }
4171
4172 skb_put(skb, len);
4173 h2c = (struct rtw89_h2c_ofld *)skb->data;
4174
4175 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) |
4176 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) |
4177 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP);
4178
4179 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4180 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4181 H2C_FUNC_OFLD_TP, 0, 1, len);
4182
4183 ret = rtw89_h2c_tx(rtwdev, skb, false);
4184 if (ret) {
4185 rtw89_err(rtwdev, "failed to send h2c\n");
4186 goto fail;
4187 }
4188
4189 return 0;
4190 fail:
4191 dev_kfree_skb_any(skb);
4192
4193 return ret;
4194 }
4195
rtw89_fw_h2c_ra(struct rtw89_dev * rtwdev,struct rtw89_ra_info * ra,bool csi)4196 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi)
4197 {
4198 const struct rtw89_chip_info *chip = rtwdev->chip;
4199 struct rtw89_h2c_ra_v1 *h2c_v1;
4200 struct rtw89_h2c_ra *h2c;
4201 u32 len = sizeof(*h2c);
4202 bool format_v1 = false;
4203 struct sk_buff *skb;
4204 int ret;
4205
4206 if (chip->chip_gen == RTW89_CHIP_BE) {
4207 len = sizeof(*h2c_v1);
4208 format_v1 = true;
4209 }
4210
4211 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4212 if (!skb) {
4213 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
4214 return -ENOMEM;
4215 }
4216 skb_put(skb, len);
4217 h2c = (struct rtw89_h2c_ra *)skb->data;
4218 rtw89_debug(rtwdev, RTW89_DBG_RA,
4219 "ra cmd msk: %llx ", ra->ra_mask);
4220
4221 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) |
4222 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) |
4223 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) |
4224 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) |
4225 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) |
4226 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) |
4227 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) |
4228 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) |
4229 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) |
4230 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) |
4231 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) |
4232 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) |
4233 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) |
4234 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK);
4235 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32);
4236 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32);
4237 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) |
4238 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF);
4239
4240 if (!format_v1)
4241 goto csi;
4242
4243 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c;
4244 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) |
4245 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT);
4246
4247 csi:
4248 if (!csi)
4249 goto done;
4250
4251 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL);
4252 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) |
4253 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) |
4254 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) |
4255 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) |
4256 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) |
4257 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) |
4258 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) |
4259 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW);
4260
4261 done:
4262 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4263 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA,
4264 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
4265 len);
4266
4267 ret = rtw89_h2c_tx(rtwdev, skb, false);
4268 if (ret) {
4269 rtw89_err(rtwdev, "failed to send h2c\n");
4270 goto fail;
4271 }
4272
4273 return 0;
4274 fail:
4275 dev_kfree_skb_any(skb);
4276
4277 return ret;
4278 }
4279
rtw89_fw_h2c_cxdrv_init(struct rtw89_dev * rtwdev,u8 type)4280 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type)
4281 {
4282 struct rtw89_btc *btc = &rtwdev->btc;
4283 struct rtw89_btc_dm *dm = &btc->dm;
4284 struct rtw89_btc_init_info *init_info = &dm->init_info.init;
4285 struct rtw89_btc_module *module = &init_info->module;
4286 struct rtw89_btc_ant_info *ant = &module->ant;
4287 struct rtw89_h2c_cxinit *h2c;
4288 u32 len = sizeof(*h2c);
4289 struct sk_buff *skb;
4290 int ret;
4291
4292 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4293 if (!skb) {
4294 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
4295 return -ENOMEM;
4296 }
4297 skb_put(skb, len);
4298 h2c = (struct rtw89_h2c_cxinit *)skb->data;
4299
4300 h2c->hdr.type = type;
4301 h2c->hdr.len = len - H2C_LEN_CXDRVHDR;
4302
4303 h2c->ant_type = ant->type;
4304 h2c->ant_num = ant->num;
4305 h2c->ant_iso = ant->isolation;
4306 h2c->ant_info =
4307 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) |
4308 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) |
4309 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) |
4310 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT);
4311
4312 h2c->mod_rfe = module->rfe_type;
4313 h2c->mod_cv = module->cv;
4314 h2c->mod_info =
4315 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) |
4316 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) |
4317 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) |
4318 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE);
4319 h2c->mod_adie_kt = module->kt_ver_adie;
4320 h2c->wl_gch = init_info->wl_guard_ch;
4321
4322 h2c->info =
4323 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) |
4324 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) |
4325 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) |
4326 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) |
4327 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY);
4328
4329 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4330 H2C_CAT_OUTSRC, BTFC_SET,
4331 SET_DRV_INFO, 0, 0,
4332 len);
4333
4334 ret = rtw89_h2c_tx(rtwdev, skb, false);
4335 if (ret) {
4336 rtw89_err(rtwdev, "failed to send h2c\n");
4337 goto fail;
4338 }
4339
4340 return 0;
4341 fail:
4342 dev_kfree_skb_any(skb);
4343
4344 return ret;
4345 }
4346
rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev * rtwdev,u8 type)4347 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type)
4348 {
4349 struct rtw89_btc *btc = &rtwdev->btc;
4350 struct rtw89_btc_dm *dm = &btc->dm;
4351 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7;
4352 struct rtw89_h2c_cxinit_v7 *h2c;
4353 u32 len = sizeof(*h2c);
4354 struct sk_buff *skb;
4355 int ret;
4356
4357 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4358 if (!skb) {
4359 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n");
4360 return -ENOMEM;
4361 }
4362 skb_put(skb, len);
4363 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data;
4364
4365 h2c->hdr.type = type;
4366 h2c->hdr.ver = btc->ver->fcxinit;
4367 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
4368 h2c->init = *init_info;
4369
4370 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4371 H2C_CAT_OUTSRC, BTFC_SET,
4372 SET_DRV_INFO, 0, 0,
4373 len);
4374
4375 ret = rtw89_h2c_tx(rtwdev, skb, false);
4376 if (ret) {
4377 rtw89_err(rtwdev, "failed to send h2c\n");
4378 goto fail;
4379 }
4380
4381 return 0;
4382 fail:
4383 dev_kfree_skb_any(skb);
4384
4385 return ret;
4386 }
4387
4388 #define PORT_DATA_OFFSET 4
4389 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
4390 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \
4391 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR)
4392
rtw89_fw_h2c_cxdrv_role(struct rtw89_dev * rtwdev,u8 type)4393 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type)
4394 {
4395 struct rtw89_btc *btc = &rtwdev->btc;
4396 const struct rtw89_btc_ver *ver = btc->ver;
4397 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
4398 struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
4399 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
4400 struct rtw89_btc_wl_active_role *active = role_info->active_role;
4401 struct sk_buff *skb;
4402 u32 len;
4403 u8 offset = 0;
4404 u8 *cmd;
4405 int ret;
4406 int i;
4407
4408 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num);
4409
4410 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4411 if (!skb) {
4412 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
4413 return -ENOMEM;
4414 }
4415 skb_put(skb, len);
4416 cmd = skb->data;
4417
4418 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
4419 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
4420
4421 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
4422 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
4423
4424 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
4425 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
4426 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
4427 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
4428 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
4429 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
4430 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
4431 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
4432 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
4433 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
4434 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
4435 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
4436
4437 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
4438 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
4439 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
4440 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
4441 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
4442 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
4443 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
4444 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
4445 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
4446 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
4447 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
4448 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
4449 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
4450 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
4451 }
4452
4453 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4454 H2C_CAT_OUTSRC, BTFC_SET,
4455 SET_DRV_INFO, 0, 0,
4456 len);
4457
4458 ret = rtw89_h2c_tx(rtwdev, skb, false);
4459 if (ret) {
4460 rtw89_err(rtwdev, "failed to send h2c\n");
4461 goto fail;
4462 }
4463
4464 return 0;
4465 fail:
4466 dev_kfree_skb_any(skb);
4467
4468 return ret;
4469 }
4470
4471 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \
4472 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
4473
rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev * rtwdev,u8 type)4474 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type)
4475 {
4476 struct rtw89_btc *btc = &rtwdev->btc;
4477 const struct rtw89_btc_ver *ver = btc->ver;
4478 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
4479 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
4480 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
4481 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
4482 struct sk_buff *skb;
4483 u32 len;
4484 u8 *cmd, offset;
4485 int ret;
4486 int i;
4487
4488 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num);
4489
4490 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4491 if (!skb) {
4492 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
4493 return -ENOMEM;
4494 }
4495 skb_put(skb, len);
4496 cmd = skb->data;
4497
4498 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
4499 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
4500
4501 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
4502 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
4503
4504 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
4505 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
4506 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
4507 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
4508 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
4509 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
4510 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
4511 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
4512 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
4513 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
4514 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
4515 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
4516
4517 offset = PORT_DATA_OFFSET;
4518 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
4519 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
4520 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
4521 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
4522 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
4523 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
4524 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
4525 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
4526 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
4527 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
4528 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
4529 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
4530 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
4531 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
4532 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
4533 }
4534
4535 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
4536 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
4537 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
4538 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
4539 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
4540 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
4541 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
4542
4543 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4544 H2C_CAT_OUTSRC, BTFC_SET,
4545 SET_DRV_INFO, 0, 0,
4546 len);
4547
4548 ret = rtw89_h2c_tx(rtwdev, skb, false);
4549 if (ret) {
4550 rtw89_err(rtwdev, "failed to send h2c\n");
4551 goto fail;
4552 }
4553
4554 return 0;
4555 fail:
4556 dev_kfree_skb_any(skb);
4557
4558 return ret;
4559 }
4560
4561 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \
4562 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
4563
rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev * rtwdev,u8 type)4564 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type)
4565 {
4566 struct rtw89_btc *btc = &rtwdev->btc;
4567 const struct rtw89_btc_ver *ver = btc->ver;
4568 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
4569 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2;
4570 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
4571 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2;
4572 struct sk_buff *skb;
4573 u32 len;
4574 u8 *cmd, offset;
4575 int ret;
4576 int i;
4577
4578 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num);
4579
4580 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4581 if (!skb) {
4582 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
4583 return -ENOMEM;
4584 }
4585 skb_put(skb, len);
4586 cmd = skb->data;
4587
4588 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
4589 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
4590
4591 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
4592 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
4593
4594 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
4595 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
4596 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
4597 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
4598 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
4599 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
4600 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
4601 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
4602 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
4603 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
4604 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
4605 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
4606
4607 offset = PORT_DATA_OFFSET;
4608 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
4609 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset);
4610 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset);
4611 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset);
4612 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset);
4613 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset);
4614 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset);
4615 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset);
4616 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset);
4617 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset);
4618 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset);
4619 }
4620
4621 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
4622 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
4623 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
4624 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
4625 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
4626 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
4627 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
4628
4629 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4630 H2C_CAT_OUTSRC, BTFC_SET,
4631 SET_DRV_INFO, 0, 0,
4632 len);
4633
4634 ret = rtw89_h2c_tx(rtwdev, skb, false);
4635 if (ret) {
4636 rtw89_err(rtwdev, "failed to send h2c\n");
4637 goto fail;
4638 }
4639
4640 return 0;
4641 fail:
4642 dev_kfree_skb_any(skb);
4643
4644 return ret;
4645 }
4646
rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev * rtwdev,u8 type)4647 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type)
4648 {
4649 struct rtw89_btc *btc = &rtwdev->btc;
4650 struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7;
4651 struct rtw89_h2c_cxrole_v7 *h2c;
4652 u32 len = sizeof(*h2c);
4653 struct sk_buff *skb;
4654 int ret;
4655
4656 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4657 if (!skb) {
4658 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
4659 return -ENOMEM;
4660 }
4661 skb_put(skb, len);
4662 h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data;
4663
4664 h2c->hdr.type = type;
4665 h2c->hdr.ver = btc->ver->fwlrole;
4666 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
4667 memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
4668 h2c->_u32.role_map = cpu_to_le32(role->role_map);
4669 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
4670 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
4671 h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en);
4672 h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg);
4673 h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy);
4674
4675 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4676 H2C_CAT_OUTSRC, BTFC_SET,
4677 SET_DRV_INFO, 0, 0,
4678 len);
4679
4680 ret = rtw89_h2c_tx(rtwdev, skb, false);
4681 if (ret) {
4682 rtw89_err(rtwdev, "failed to send h2c\n");
4683 goto fail;
4684 }
4685
4686 return 0;
4687 fail:
4688 dev_kfree_skb_any(skb);
4689
4690 return ret;
4691 }
4692
rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev * rtwdev,u8 type)4693 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type)
4694 {
4695 struct rtw89_btc *btc = &rtwdev->btc;
4696 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8;
4697 struct rtw89_h2c_cxrole_v8 *h2c;
4698 u32 len = sizeof(*h2c);
4699 struct sk_buff *skb;
4700 int ret;
4701
4702 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4703 if (!skb) {
4704 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
4705 return -ENOMEM;
4706 }
4707 skb_put(skb, len);
4708 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data;
4709
4710 h2c->hdr.type = type;
4711 h2c->hdr.ver = btc->ver->fwlrole;
4712 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
4713 memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
4714 h2c->_u32.role_map = cpu_to_le32(role->role_map);
4715 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
4716 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
4717
4718 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4719 H2C_CAT_OUTSRC, BTFC_SET,
4720 SET_DRV_INFO, 0, 0,
4721 len);
4722
4723 ret = rtw89_h2c_tx(rtwdev, skb, false);
4724 if (ret) {
4725 rtw89_err(rtwdev, "failed to send h2c\n");
4726 goto fail;
4727 }
4728
4729 return 0;
4730 fail:
4731 dev_kfree_skb_any(skb);
4732
4733 return ret;
4734 }
4735
4736 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev * rtwdev,u8 type)4737 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type)
4738 {
4739 struct rtw89_btc *btc = &rtwdev->btc;
4740 const struct rtw89_btc_ver *ver = btc->ver;
4741 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl;
4742 struct sk_buff *skb;
4743 u8 *cmd;
4744 int ret;
4745
4746 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
4747 if (!skb) {
4748 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
4749 return -ENOMEM;
4750 }
4751 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
4752 cmd = skb->data;
4753
4754 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
4755 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
4756
4757 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
4758 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
4759 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
4760 if (ver->fcxctrl == 0)
4761 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
4762
4763 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4764 H2C_CAT_OUTSRC, BTFC_SET,
4765 SET_DRV_INFO, 0, 0,
4766 H2C_LEN_CXDRVINFO_CTRL);
4767
4768 ret = rtw89_h2c_tx(rtwdev, skb, false);
4769 if (ret) {
4770 rtw89_err(rtwdev, "failed to send h2c\n");
4771 goto fail;
4772 }
4773
4774 return 0;
4775 fail:
4776 dev_kfree_skb_any(skb);
4777
4778 return ret;
4779 }
4780
rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev * rtwdev,u8 type)4781 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type)
4782 {
4783 struct rtw89_btc *btc = &rtwdev->btc;
4784 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7;
4785 struct rtw89_h2c_cxctrl_v7 *h2c;
4786 u32 len = sizeof(*h2c);
4787 struct sk_buff *skb;
4788 int ret;
4789
4790 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4791 if (!skb) {
4792 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n");
4793 return -ENOMEM;
4794 }
4795 skb_put(skb, len);
4796 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data;
4797
4798 h2c->hdr.type = type;
4799 h2c->hdr.ver = btc->ver->fcxctrl;
4800 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7;
4801 h2c->ctrl = *ctrl;
4802
4803 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4804 H2C_CAT_OUTSRC, BTFC_SET,
4805 SET_DRV_INFO, 0, 0, len);
4806
4807 ret = rtw89_h2c_tx(rtwdev, skb, false);
4808 if (ret) {
4809 rtw89_err(rtwdev, "failed to send h2c\n");
4810 goto fail;
4811 }
4812
4813 return 0;
4814 fail:
4815 dev_kfree_skb_any(skb);
4816
4817 return ret;
4818 }
4819
4820 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev * rtwdev,u8 type)4821 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type)
4822 {
4823 struct rtw89_btc *btc = &rtwdev->btc;
4824 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info;
4825 struct sk_buff *skb;
4826 u8 *cmd;
4827 int ret;
4828
4829 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX);
4830 if (!skb) {
4831 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n");
4832 return -ENOMEM;
4833 }
4834 skb_put(skb, H2C_LEN_CXDRVINFO_TRX);
4835 cmd = skb->data;
4836
4837 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
4838 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR);
4839
4840 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl);
4841 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl);
4842 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi);
4843 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi);
4844 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power);
4845 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain);
4846 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power);
4847 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain);
4848 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn);
4849 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm);
4850 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile);
4851 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2);
4852 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate);
4853 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate);
4854 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp);
4855 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp);
4856 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio);
4857
4858 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4859 H2C_CAT_OUTSRC, BTFC_SET,
4860 SET_DRV_INFO, 0, 0,
4861 H2C_LEN_CXDRVINFO_TRX);
4862
4863 ret = rtw89_h2c_tx(rtwdev, skb, false);
4864 if (ret) {
4865 rtw89_err(rtwdev, "failed to send h2c\n");
4866 goto fail;
4867 }
4868
4869 return 0;
4870 fail:
4871 dev_kfree_skb_any(skb);
4872
4873 return ret;
4874 }
4875
4876 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev * rtwdev,u8 type)4877 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type)
4878 {
4879 struct rtw89_btc *btc = &rtwdev->btc;
4880 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
4881 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
4882 struct sk_buff *skb;
4883 u8 *cmd;
4884 int ret;
4885
4886 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
4887 if (!skb) {
4888 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
4889 return -ENOMEM;
4890 }
4891 skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
4892 cmd = skb->data;
4893
4894 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
4895 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
4896
4897 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
4898 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map);
4899 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map);
4900 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band);
4901 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type);
4902
4903 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4904 H2C_CAT_OUTSRC, BTFC_SET,
4905 SET_DRV_INFO, 0, 0,
4906 H2C_LEN_CXDRVINFO_RFK);
4907
4908 ret = rtw89_h2c_tx(rtwdev, skb, false);
4909 if (ret) {
4910 rtw89_err(rtwdev, "failed to send h2c\n");
4911 goto fail;
4912 }
4913
4914 return 0;
4915 fail:
4916 dev_kfree_skb_any(skb);
4917
4918 return ret;
4919 }
4920
4921 #define H2C_LEN_PKT_OFLD 4
rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev * rtwdev,u8 id)4922 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
4923 {
4924 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
4925 struct sk_buff *skb;
4926 unsigned int cond;
4927 u8 *cmd;
4928 int ret;
4929
4930 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
4931 if (!skb) {
4932 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
4933 return -ENOMEM;
4934 }
4935 skb_put(skb, H2C_LEN_PKT_OFLD);
4936 cmd = skb->data;
4937
4938 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
4939 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
4940
4941 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4942 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4943 H2C_FUNC_PACKET_OFLD, 1, 1,
4944 H2C_LEN_PKT_OFLD);
4945
4946 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL);
4947
4948 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4949 if (ret < 0) {
4950 rtw89_debug(rtwdev, RTW89_DBG_FW,
4951 "failed to del pkt ofld: id %d, ret %d\n",
4952 id, ret);
4953 return ret;
4954 }
4955
4956 rtw89_core_release_bit_map(rtwdev->pkt_offload, id);
4957 return 0;
4958 }
4959
rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev * rtwdev,u8 * id,struct sk_buff * skb_ofld)4960 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
4961 struct sk_buff *skb_ofld)
4962 {
4963 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
4964 struct sk_buff *skb;
4965 unsigned int cond;
4966 u8 *cmd;
4967 u8 alloc_id;
4968 int ret;
4969
4970 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
4971 RTW89_MAX_PKT_OFLD_NUM);
4972 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
4973 return -ENOSPC;
4974
4975 *id = alloc_id;
4976
4977 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
4978 if (!skb) {
4979 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
4980 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
4981 return -ENOMEM;
4982 }
4983 skb_put(skb, H2C_LEN_PKT_OFLD);
4984 cmd = skb->data;
4985
4986 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
4987 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
4988 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
4989 skb_put_data(skb, skb_ofld->data, skb_ofld->len);
4990
4991 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4992 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4993 H2C_FUNC_PACKET_OFLD, 1, 1,
4994 H2C_LEN_PKT_OFLD + skb_ofld->len);
4995
4996 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD);
4997
4998 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4999 if (ret < 0) {
5000 rtw89_debug(rtwdev, RTW89_DBG_FW,
5001 "failed to add pkt ofld: id %d, ret %d\n",
5002 alloc_id, ret);
5003 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
5004 return ret;
5005 }
5006
5007 return 0;
5008 }
5009
5010 static
rtw89_fw_h2c_scan_list_offload(struct rtw89_dev * rtwdev,int ch_num,struct list_head * chan_list)5011 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num,
5012 struct list_head *chan_list)
5013 {
5014 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5015 struct rtw89_h2c_chinfo_elem *elem;
5016 struct rtw89_mac_chinfo *ch_info;
5017 struct rtw89_h2c_chinfo *h2c;
5018 struct sk_buff *skb;
5019 unsigned int cond;
5020 int skb_len;
5021 int ret;
5022
5023 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
5024
5025 skb_len = struct_size(h2c, elem, ch_num);
5026 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
5027 if (!skb) {
5028 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
5029 return -ENOMEM;
5030 }
5031 skb_put(skb, sizeof(*h2c));
5032 h2c = (struct rtw89_h2c_chinfo *)skb->data;
5033
5034 h2c->ch_num = ch_num;
5035 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
5036
5037 list_for_each_entry(ch_info, chan_list, list) {
5038 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem));
5039
5040 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) |
5041 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) |
5042 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) |
5043 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH);
5044
5045 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) |
5046 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) |
5047 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) |
5048 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) |
5049 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) |
5050 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) |
5051 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) |
5052 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) |
5053 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) |
5054 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM);
5055
5056 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) |
5057 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) |
5058 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) |
5059 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3);
5060
5061 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) |
5062 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) |
5063 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) |
5064 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7);
5065 }
5066
5067 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5068 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5069 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
5070
5071 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
5072
5073 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5074 if (ret) {
5075 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
5076 return ret;
5077 }
5078
5079 return 0;
5080 }
5081
5082 static
rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev * rtwdev,int ch_num,struct list_head * chan_list,struct rtw89_vif_link * rtwvif_link)5083 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
5084 struct list_head *chan_list,
5085 struct rtw89_vif_link *rtwvif_link)
5086 {
5087 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5088 struct rtw89_h2c_chinfo_elem_be *elem;
5089 struct rtw89_mac_chinfo_be *ch_info;
5090 struct rtw89_h2c_chinfo_be *h2c;
5091 struct sk_buff *skb;
5092 unsigned int cond;
5093 u8 ver = U8_MAX;
5094 int skb_len;
5095 int ret;
5096
5097 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE);
5098
5099 skb_len = struct_size(h2c, elem, ch_num);
5100 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
5101 if (!skb) {
5102 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
5103 return -ENOMEM;
5104 }
5105
5106 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
5107 ver = 0;
5108
5109 skb_put(skb, sizeof(*h2c));
5110 h2c = (struct rtw89_h2c_chinfo_be *)skb->data;
5111
5112 h2c->ch_num = ch_num;
5113 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
5114 h2c->arg = u8_encode_bits(rtwvif_link->mac_idx,
5115 RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK);
5116
5117 list_for_each_entry(ch_info, chan_list, list) {
5118 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem));
5119
5120 elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
5121 le32_encode_bits(ch_info->central_ch,
5122 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) |
5123 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH);
5124
5125 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) |
5126 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) |
5127 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) |
5128 le32_encode_bits(ch_info->pause_data,
5129 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) |
5130 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) |
5131 le32_encode_bits(ch_info->rand_seq_num,
5132 RTW89_H2C_CHINFO_BE_W1_RANDOM) |
5133 le32_encode_bits(ch_info->notify_action,
5134 RTW89_H2C_CHINFO_BE_W1_NOTIFY) |
5135 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0,
5136 RTW89_H2C_CHINFO_BE_W1_PROBE) |
5137 le32_encode_bits(ch_info->leave_crit,
5138 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) |
5139 le32_encode_bits(ch_info->chkpt_timer,
5140 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER);
5141
5142 elem->w2 = le32_encode_bits(ch_info->leave_time,
5143 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) |
5144 le32_encode_bits(ch_info->leave_th,
5145 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) |
5146 le32_encode_bits(ch_info->tx_pkt_ctrl,
5147 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL);
5148
5149 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) |
5150 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) |
5151 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) |
5152 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3);
5153
5154 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) |
5155 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) |
5156 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) |
5157 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7);
5158
5159 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) |
5160 le32_encode_bits(ch_info->fw_probe0_ssids,
5161 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS);
5162
5163 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids,
5164 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) |
5165 le32_encode_bits(ch_info->fw_probe0_bssids,
5166 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS);
5167 if (ver == 0)
5168 elem->w0 |=
5169 le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD);
5170 else
5171 elem->w7 = le32_encode_bits(ch_info->period,
5172 RTW89_H2C_CHINFO_BE_W7_PERIOD_V1);
5173 }
5174
5175 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5176 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5177 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
5178
5179 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
5180
5181 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5182 if (ret) {
5183 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
5184 return ret;
5185 }
5186
5187 return 0;
5188 }
5189
5190 #define RTW89_SCAN_DELAY_TSF_UNIT 104800
rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev * rtwdev,struct rtw89_scan_option * option,struct rtw89_vif_link * rtwvif_link,bool wowlan)5191 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
5192 struct rtw89_scan_option *option,
5193 struct rtw89_vif_link *rtwvif_link,
5194 bool wowlan)
5195 {
5196 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5197 struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
5198 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE;
5199 struct rtw89_h2c_scanofld *h2c;
5200 u32 len = sizeof(*h2c);
5201 struct sk_buff *skb;
5202 unsigned int cond;
5203 u64 tsf = 0;
5204 int ret;
5205
5206 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5207 if (!skb) {
5208 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
5209 return -ENOMEM;
5210 }
5211 skb_put(skb, len);
5212 h2c = (struct rtw89_h2c_scanofld *)skb->data;
5213
5214 if (option->delay) {
5215 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
5216 if (ret) {
5217 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret);
5218 scan_mode = RTW89_SCAN_IMMEDIATE;
5219 } else {
5220 scan_mode = RTW89_SCAN_DELAY;
5221 tsf += (u64)option->delay * RTW89_SCAN_DELAY_TSF_UNIT;
5222 }
5223 }
5224
5225 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) |
5226 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) |
5227 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_SCANOFLD_W0_BAND) |
5228 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION);
5229
5230 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) |
5231 le32_encode_bits(option->target_ch_mode,
5232 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) |
5233 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) |
5234 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE);
5235
5236 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) |
5237 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD);
5238
5239 if (option->target_ch_mode) {
5240 h2c->w1 |= le32_encode_bits(op->band_width,
5241 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) |
5242 le32_encode_bits(op->primary_channel,
5243 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) |
5244 le32_encode_bits(op->channel,
5245 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH);
5246 h2c->w0 |= le32_encode_bits(op->band_type,
5247 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND);
5248 }
5249
5250 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf),
5251 RTW89_H2C_SCANOFLD_W3_TSF_HIGH);
5252 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf),
5253 RTW89_H2C_SCANOFLD_W4_TSF_LOW);
5254
5255 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5256 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5257 H2C_FUNC_SCANOFLD, 1, 1,
5258 len);
5259
5260 if (option->enable)
5261 cond = RTW89_SCANOFLD_WAIT_COND_START;
5262 else
5263 cond = RTW89_SCANOFLD_WAIT_COND_STOP;
5264
5265 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5266 if (ret) {
5267 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n");
5268 return ret;
5269 }
5270
5271 return 0;
5272 }
5273
rtw89_scan_get_6g_disabled_chan(struct rtw89_dev * rtwdev,struct rtw89_scan_option * option)5274 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
5275 struct rtw89_scan_option *option)
5276 {
5277 struct ieee80211_supported_band *sband;
5278 struct ieee80211_channel *chan;
5279 u8 i, idx;
5280
5281 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ];
5282 if (!sband) {
5283 option->prohib_chan = U64_MAX;
5284 return;
5285 }
5286
5287 for (i = 0; i < sband->n_channels; i++) {
5288 chan = &sband->channels[i];
5289 if (chan->flags & IEEE80211_CHAN_DISABLED) {
5290 idx = (chan->hw_value - 1) / 4;
5291 option->prohib_chan |= BIT(idx);
5292 }
5293 }
5294 }
5295
rtw89_fw_h2c_scan_offload_be(struct rtw89_dev * rtwdev,struct rtw89_scan_option * option,struct rtw89_vif_link * rtwvif_link,bool wowlan)5296 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
5297 struct rtw89_scan_option *option,
5298 struct rtw89_vif_link *rtwvif_link,
5299 bool wowlan)
5300 {
5301 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
5302 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
5303 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5304 struct cfg80211_scan_request *req = rtwvif->scan_req;
5305 struct rtw89_h2c_scanofld_be_macc_role *macc_role;
5306 struct rtw89_chan *op = &scan_info->op_chan;
5307 struct rtw89_h2c_scanofld_be_opch *opch;
5308 struct rtw89_pktofld_info *pkt_info;
5309 struct rtw89_h2c_scanofld_be *h2c;
5310 struct sk_buff *skb;
5311 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
5312 u8 opch_size = sizeof(*opch) * option->num_opch;
5313 u8 probe_id[NUM_NL80211_BANDS];
5314 u8 scan_offload_ver = U8_MAX;
5315 u8 cfg_len = sizeof(*h2c);
5316 unsigned int cond;
5317 u8 ver = U8_MAX;
5318 void *ptr;
5319 int ret;
5320 u32 len;
5321 u8 i;
5322
5323 rtw89_scan_get_6g_disabled_chan(rtwdev, option);
5324
5325 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) {
5326 cfg_len = offsetofend(typeof(*h2c), w8);
5327 scan_offload_ver = 0;
5328 }
5329
5330 len = cfg_len + macc_role_size + opch_size;
5331 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5332 if (!skb) {
5333 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
5334 return -ENOMEM;
5335 }
5336
5337 skb_put(skb, len);
5338 h2c = (struct rtw89_h2c_scanofld_be *)skb->data;
5339 ptr = skb->data;
5340
5341 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id));
5342
5343 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
5344 ver = 0;
5345
5346 if (!wowlan) {
5347 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) {
5348 if (pkt_info->wildcard_6ghz) {
5349 /* Provide wildcard as template */
5350 probe_id[NL80211_BAND_6GHZ] = pkt_info->id;
5351 break;
5352 }
5353 }
5354 }
5355
5356 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) |
5357 le32_encode_bits(option->scan_mode,
5358 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) |
5359 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) |
5360 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) |
5361 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) |
5362 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) |
5363 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) |
5364 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND);
5365
5366 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) |
5367 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) |
5368 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD);
5369
5370 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) |
5371 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) |
5372 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END);
5373
5374 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) |
5375 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) |
5376 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) |
5377 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID);
5378
5379 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ],
5380 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) |
5381 le32_encode_bits(probe_id[NL80211_BAND_6GHZ],
5382 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) |
5383 le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START);
5384
5385 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE);
5386
5387 h2c->w6 = le32_encode_bits(option->prohib_chan,
5388 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW);
5389 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32,
5390 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH);
5391 if (!wowlan && req->no_cck) {
5392 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE);
5393 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6,
5394 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) |
5395 le32_encode_bits(RTW89_HW_RATE_OFDM6,
5396 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) |
5397 le32_encode_bits(RTW89_HW_RATE_OFDM6,
5398 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ);
5399 }
5400
5401 if (scan_offload_ver == 0)
5402 goto flex_member;
5403
5404 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0),
5405 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) |
5406 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0),
5407 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) |
5408 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0),
5409 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP);
5410
5411 flex_member:
5412 ptr += cfg_len;
5413
5414 for (i = 0; i < option->num_macc_role; i++) {
5415 macc_role = ptr;
5416 macc_role->w0 =
5417 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) |
5418 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) |
5419 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) |
5420 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END);
5421 ptr += sizeof(*macc_role);
5422 }
5423
5424 for (i = 0; i < option->num_opch; i++) {
5425 opch = ptr;
5426 opch->w0 = le32_encode_bits(rtwvif_link->mac_id,
5427 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) |
5428 le32_encode_bits(option->band,
5429 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) |
5430 le32_encode_bits(rtwvif_link->port,
5431 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) |
5432 le32_encode_bits(RTW89_SCAN_OPMODE_INTV,
5433 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) |
5434 le32_encode_bits(true,
5435 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) |
5436 le32_encode_bits(RTW89_OFF_CHAN_TIME / 10,
5437 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL);
5438
5439 opch->w1 = le32_encode_bits(op->band_type,
5440 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) |
5441 le32_encode_bits(op->band_width,
5442 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) |
5443 le32_encode_bits(0x3,
5444 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) |
5445 le32_encode_bits(op->primary_channel,
5446 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) |
5447 le32_encode_bits(op->channel,
5448 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH);
5449
5450 opch->w2 = le32_encode_bits(0,
5451 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) |
5452 le32_encode_bits(0,
5453 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) |
5454 le32_encode_bits(2,
5455 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS);
5456
5457 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5458 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) |
5459 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5460 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) |
5461 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5462 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) |
5463 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5464 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3);
5465
5466 if (ver == 0)
5467 opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME,
5468 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION);
5469 else
5470 opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME,
5471 RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1);
5472 ptr += sizeof(*opch);
5473 }
5474
5475 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5476 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5477 H2C_FUNC_SCANOFLD_BE, 1, 1,
5478 len);
5479
5480 if (option->enable)
5481 cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
5482 else
5483 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP;
5484
5485 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5486 if (ret) {
5487 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n");
5488 return ret;
5489 }
5490
5491 return 0;
5492 }
5493
rtw89_fw_h2c_rf_reg(struct rtw89_dev * rtwdev,struct rtw89_fw_h2c_rf_reg_info * info,u16 len,u8 page)5494 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
5495 struct rtw89_fw_h2c_rf_reg_info *info,
5496 u16 len, u8 page)
5497 {
5498 struct sk_buff *skb;
5499 u8 class = info->rf_path == RF_PATH_A ?
5500 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
5501 int ret;
5502
5503 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5504 if (!skb) {
5505 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
5506 return -ENOMEM;
5507 }
5508 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len);
5509
5510 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5511 H2C_CAT_OUTSRC, class, page, 0, 0,
5512 len);
5513
5514 ret = rtw89_h2c_tx(rtwdev, skb, false);
5515 if (ret) {
5516 rtw89_err(rtwdev, "failed to send h2c\n");
5517 goto fail;
5518 }
5519
5520 return 0;
5521 fail:
5522 dev_kfree_skb_any(skb);
5523
5524 return ret;
5525 }
5526
rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev * rtwdev)5527 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
5528 {
5529 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
5530 struct rtw89_fw_h2c_rf_get_mccch *mccch;
5531 struct sk_buff *skb;
5532 int ret;
5533 u8 idx;
5534
5535 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
5536 if (!skb) {
5537 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
5538 return -ENOMEM;
5539 }
5540 skb_put(skb, sizeof(*mccch));
5541 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
5542
5543 idx = rfk_mcc->table_idx;
5544 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]);
5545 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]);
5546 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]);
5547 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]);
5548 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]);
5549 mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]);
5550
5551 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5552 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
5553 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
5554 sizeof(*mccch));
5555
5556 ret = rtw89_h2c_tx(rtwdev, skb, false);
5557 if (ret) {
5558 rtw89_err(rtwdev, "failed to send h2c\n");
5559 goto fail;
5560 }
5561
5562 return 0;
5563 fail:
5564 dev_kfree_skb_any(skb);
5565
5566 return ret;
5567 }
5568 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
5569
rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)5570 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
5571 enum rtw89_phy_idx phy_idx)
5572 {
5573 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
5574 struct rtw89_fw_h2c_rfk_pre_info_common *common;
5575 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0;
5576 struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1;
5577 struct rtw89_fw_h2c_rfk_pre_info *h2c;
5578 u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH];
5579 u32 len = sizeof(*h2c);
5580 struct sk_buff *skb;
5581 u8 ver = U8_MAX;
5582 u8 tbl, path;
5583 u32 val32;
5584 int ret;
5585
5586 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) {
5587 len = sizeof(*h2c_v1);
5588 ver = 1;
5589 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
5590 len = sizeof(*h2c_v0);
5591 ver = 0;
5592 }
5593
5594 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5595 if (!skb) {
5596 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n");
5597 return -ENOMEM;
5598 }
5599 skb_put(skb, len);
5600 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
5601 common = &h2c->base_v1.common;
5602
5603 common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
5604
5605 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
5606 BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH);
5607
5608 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
5609 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
5610 common->dbcc.ch[path][tbl] =
5611 cpu_to_le32(rfk_mcc->data[path].ch[tbl]);
5612 common->dbcc.band[path][tbl] =
5613 cpu_to_le32(rfk_mcc->data[path].band[tbl]);
5614 }
5615 }
5616
5617 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
5618 tbl_sel[path] = rfk_mcc->data[path].table_idx;
5619
5620 common->tbl.cur_ch[path] =
5621 cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]);
5622 common->tbl.cur_band[path] =
5623 cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]);
5624
5625 if (ver <= 1)
5626 continue;
5627
5628 h2c->cur_bandwidth[path] =
5629 cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]);
5630 }
5631
5632 common->phy_idx = cpu_to_le32(phy_idx);
5633
5634 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */
5635 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data;
5636
5637 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]);
5638 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]);
5639 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]);
5640
5641 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1);
5642 h2c_v0->ktbl_sel0 = cpu_to_le32(val32);
5643 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1);
5644 h2c_v0->ktbl_sel1 = cpu_to_le32(val32);
5645 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
5646 h2c_v0->rfmod0 = cpu_to_le32(val32);
5647 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
5648 h2c_v0->rfmod1 = cpu_to_le32(val32);
5649
5650 if (rtw89_is_mlo_1_1(rtwdev))
5651 h2c_v0->mlo_1_1 = cpu_to_le32(1);
5652
5653 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type);
5654
5655 goto done;
5656 }
5657
5658 if (rtw89_is_mlo_1_1(rtwdev)) {
5659 h2c_v1 = &h2c->base_v1;
5660 h2c_v1->mlo_1_1 = cpu_to_le32(1);
5661 }
5662 done:
5663 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5664 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
5665 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0,
5666 len);
5667
5668 ret = rtw89_h2c_tx(rtwdev, skb, false);
5669 if (ret) {
5670 rtw89_err(rtwdev, "failed to send h2c\n");
5671 goto fail;
5672 }
5673
5674 return 0;
5675 fail:
5676 dev_kfree_skb_any(skb);
5677
5678 return ret;
5679 }
5680
rtw89_fw_h2c_rf_tssi(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,enum rtw89_tssi_mode tssi_mode)5681 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
5682 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode)
5683 {
5684 struct rtw89_hal *hal = &rtwdev->hal;
5685 struct rtw89_h2c_rf_tssi *h2c;
5686 u32 len = sizeof(*h2c);
5687 struct sk_buff *skb;
5688 int ret;
5689
5690 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5691 if (!skb) {
5692 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n");
5693 return -ENOMEM;
5694 }
5695 skb_put(skb, len);
5696 h2c = (struct rtw89_h2c_rf_tssi *)skb->data;
5697
5698 h2c->len = cpu_to_le16(len);
5699 h2c->phy = phy_idx;
5700 h2c->ch = chan->channel;
5701 h2c->bw = chan->band_width;
5702 h2c->band = chan->band_type;
5703 h2c->hwtx_en = true;
5704 h2c->cv = hal->cv;
5705 h2c->tssi_mode = tssi_mode;
5706
5707 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c);
5708 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c);
5709
5710 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5711 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
5712 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len);
5713
5714 ret = rtw89_h2c_tx(rtwdev, skb, false);
5715 if (ret) {
5716 rtw89_err(rtwdev, "failed to send h2c\n");
5717 goto fail;
5718 }
5719
5720 return 0;
5721 fail:
5722 dev_kfree_skb_any(skb);
5723
5724 return ret;
5725 }
5726
rtw89_fw_h2c_rf_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)5727 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
5728 const struct rtw89_chan *chan)
5729 {
5730 struct rtw89_h2c_rf_iqk *h2c;
5731 u32 len = sizeof(*h2c);
5732 struct sk_buff *skb;
5733 int ret;
5734
5735 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5736 if (!skb) {
5737 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n");
5738 return -ENOMEM;
5739 }
5740 skb_put(skb, len);
5741 h2c = (struct rtw89_h2c_rf_iqk *)skb->data;
5742
5743 h2c->phy_idx = cpu_to_le32(phy_idx);
5744 h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en);
5745
5746 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5747 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
5748 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len);
5749
5750 ret = rtw89_h2c_tx(rtwdev, skb, false);
5751 if (ret) {
5752 rtw89_err(rtwdev, "failed to send h2c\n");
5753 goto fail;
5754 }
5755
5756 return 0;
5757 fail:
5758 dev_kfree_skb_any(skb);
5759
5760 return ret;
5761 }
5762
rtw89_fw_h2c_rf_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)5763 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
5764 const struct rtw89_chan *chan)
5765 {
5766 struct rtw89_h2c_rf_dpk *h2c;
5767 u32 len = sizeof(*h2c);
5768 struct sk_buff *skb;
5769 int ret;
5770
5771 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5772 if (!skb) {
5773 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n");
5774 return -ENOMEM;
5775 }
5776 skb_put(skb, len);
5777 h2c = (struct rtw89_h2c_rf_dpk *)skb->data;
5778
5779 h2c->len = len;
5780 h2c->phy = phy_idx;
5781 h2c->dpk_enable = true;
5782 h2c->kpath = RF_AB;
5783 h2c->cur_band = chan->band_type;
5784 h2c->cur_bw = chan->band_width;
5785 h2c->cur_ch = chan->channel;
5786 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
5787
5788 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5789 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
5790 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len);
5791
5792 ret = rtw89_h2c_tx(rtwdev, skb, false);
5793 if (ret) {
5794 rtw89_err(rtwdev, "failed to send h2c\n");
5795 goto fail;
5796 }
5797
5798 return 0;
5799 fail:
5800 dev_kfree_skb_any(skb);
5801
5802 return ret;
5803 }
5804
rtw89_fw_h2c_rf_txgapk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)5805 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
5806 const struct rtw89_chan *chan)
5807 {
5808 struct rtw89_hal *hal = &rtwdev->hal;
5809 struct rtw89_h2c_rf_txgapk *h2c;
5810 u32 len = sizeof(*h2c);
5811 struct sk_buff *skb;
5812 int ret;
5813
5814 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5815 if (!skb) {
5816 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n");
5817 return -ENOMEM;
5818 }
5819 skb_put(skb, len);
5820 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data;
5821
5822 h2c->len = len;
5823 h2c->ktype = 2;
5824 h2c->phy = phy_idx;
5825 h2c->kpath = RF_AB;
5826 h2c->band = chan->band_type;
5827 h2c->bw = chan->band_width;
5828 h2c->ch = chan->channel;
5829 h2c->cv = hal->cv;
5830
5831 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5832 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
5833 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len);
5834
5835 ret = rtw89_h2c_tx(rtwdev, skb, false);
5836 if (ret) {
5837 rtw89_err(rtwdev, "failed to send h2c\n");
5838 goto fail;
5839 }
5840
5841 return 0;
5842 fail:
5843 dev_kfree_skb_any(skb);
5844
5845 return ret;
5846 }
5847
rtw89_fw_h2c_rf_dack(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)5848 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
5849 const struct rtw89_chan *chan)
5850 {
5851 struct rtw89_h2c_rf_dack *h2c;
5852 u32 len = sizeof(*h2c);
5853 struct sk_buff *skb;
5854 int ret;
5855
5856 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5857 if (!skb) {
5858 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n");
5859 return -ENOMEM;
5860 }
5861 skb_put(skb, len);
5862 h2c = (struct rtw89_h2c_rf_dack *)skb->data;
5863
5864 h2c->len = cpu_to_le32(len);
5865 h2c->phy = cpu_to_le32(phy_idx);
5866 h2c->type = cpu_to_le32(0);
5867
5868 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5869 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
5870 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len);
5871
5872 ret = rtw89_h2c_tx(rtwdev, skb, false);
5873 if (ret) {
5874 rtw89_err(rtwdev, "failed to send h2c\n");
5875 goto fail;
5876 }
5877
5878 return 0;
5879 fail:
5880 dev_kfree_skb_any(skb);
5881
5882 return ret;
5883 }
5884
rtw89_fw_h2c_rf_rxdck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,bool is_chl_k)5885 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
5886 const struct rtw89_chan *chan, bool is_chl_k)
5887 {
5888 struct rtw89_h2c_rf_rxdck_v0 *v0;
5889 struct rtw89_h2c_rf_rxdck *h2c;
5890 u32 len = sizeof(*h2c);
5891 struct sk_buff *skb;
5892 int ver = -1;
5893 int ret;
5894
5895 if (RTW89_CHK_FW_FEATURE(RFK_RXDCK_V0, &rtwdev->fw)) {
5896 len = sizeof(*v0);
5897 ver = 0;
5898 }
5899
5900 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5901 if (!skb) {
5902 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n");
5903 return -ENOMEM;
5904 }
5905 skb_put(skb, len);
5906 v0 = (struct rtw89_h2c_rf_rxdck_v0 *)skb->data;
5907
5908 v0->len = len;
5909 v0->phy = phy_idx;
5910 v0->is_afe = false;
5911 v0->kpath = RF_AB;
5912 v0->cur_band = chan->band_type;
5913 v0->cur_bw = chan->band_width;
5914 v0->cur_ch = chan->channel;
5915 v0->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
5916
5917 if (ver == 0)
5918 goto hdr;
5919
5920 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data;
5921 h2c->is_chl_k = is_chl_k;
5922
5923 hdr:
5924 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5925 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
5926 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len);
5927
5928 ret = rtw89_h2c_tx(rtwdev, skb, false);
5929 if (ret) {
5930 rtw89_err(rtwdev, "failed to send h2c\n");
5931 goto fail;
5932 }
5933
5934 return 0;
5935 fail:
5936 dev_kfree_skb_any(skb);
5937
5938 return ret;
5939 }
5940
rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev * rtwdev,u8 h2c_class,u8 h2c_func,u8 * buf,u16 len,bool rack,bool dack)5941 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
5942 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
5943 bool rack, bool dack)
5944 {
5945 struct sk_buff *skb;
5946 int ret;
5947
5948 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5949 if (!skb) {
5950 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
5951 return -ENOMEM;
5952 }
5953 skb_put_data(skb, buf, len);
5954
5955 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5956 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
5957 len);
5958
5959 ret = rtw89_h2c_tx(rtwdev, skb, false);
5960 if (ret) {
5961 rtw89_err(rtwdev, "failed to send h2c\n");
5962 goto fail;
5963 }
5964
5965 return 0;
5966 fail:
5967 dev_kfree_skb_any(skb);
5968
5969 return ret;
5970 }
5971
rtw89_fw_h2c_raw(struct rtw89_dev * rtwdev,const u8 * buf,u16 len)5972 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
5973 {
5974 struct sk_buff *skb;
5975 int ret;
5976
5977 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
5978 if (!skb) {
5979 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
5980 return -ENOMEM;
5981 }
5982 skb_put_data(skb, buf, len);
5983
5984 ret = rtw89_h2c_tx(rtwdev, skb, false);
5985 if (ret) {
5986 rtw89_err(rtwdev, "failed to send h2c\n");
5987 goto fail;
5988 }
5989
5990 return 0;
5991 fail:
5992 dev_kfree_skb_any(skb);
5993
5994 return ret;
5995 }
5996
rtw89_fw_send_all_early_h2c(struct rtw89_dev * rtwdev)5997 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
5998 {
5999 struct rtw89_early_h2c *early_h2c;
6000
6001 lockdep_assert_held(&rtwdev->mutex);
6002
6003 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
6004 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
6005 }
6006 }
6007
rtw89_fw_free_all_early_h2c(struct rtw89_dev * rtwdev)6008 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
6009 {
6010 struct rtw89_early_h2c *early_h2c, *tmp;
6011
6012 mutex_lock(&rtwdev->mutex);
6013 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
6014 list_del(&early_h2c->list);
6015 kfree(early_h2c->h2c);
6016 kfree(early_h2c);
6017 }
6018 mutex_unlock(&rtwdev->mutex);
6019 }
6020
rtw89_fw_c2h_parse_attr(struct sk_buff * c2h)6021 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h)
6022 {
6023 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data;
6024 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
6025
6026 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY);
6027 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS);
6028 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC);
6029 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN);
6030 }
6031
rtw89_fw_c2h_chk_atomic(struct rtw89_dev * rtwdev,struct sk_buff * c2h)6032 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
6033 struct sk_buff *c2h)
6034 {
6035 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
6036 u8 category = attr->category;
6037 u8 class = attr->class;
6038 u8 func = attr->func;
6039
6040 switch (category) {
6041 default:
6042 return false;
6043 case RTW89_C2H_CAT_MAC:
6044 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func);
6045 case RTW89_C2H_CAT_OUTSRC:
6046 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func);
6047 }
6048 }
6049
rtw89_fw_c2h_irqsafe(struct rtw89_dev * rtwdev,struct sk_buff * c2h)6050 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
6051 {
6052 rtw89_fw_c2h_parse_attr(c2h);
6053 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h))
6054 goto enqueue;
6055
6056 rtw89_fw_c2h_cmd_handle(rtwdev, c2h);
6057 dev_kfree_skb_any(c2h);
6058 return;
6059
6060 enqueue:
6061 skb_queue_tail(&rtwdev->c2h_queue, c2h);
6062 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
6063 }
6064
rtw89_fw_c2h_cmd_handle(struct rtw89_dev * rtwdev,struct sk_buff * skb)6065 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
6066 struct sk_buff *skb)
6067 {
6068 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
6069 u8 category = attr->category;
6070 u8 class = attr->class;
6071 u8 func = attr->func;
6072 u16 len = attr->len;
6073 bool dump = true;
6074
6075 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
6076 return;
6077
6078 switch (category) {
6079 case RTW89_C2H_CAT_TEST:
6080 break;
6081 case RTW89_C2H_CAT_MAC:
6082 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func);
6083 if (class == RTW89_MAC_C2H_CLASS_INFO &&
6084 func == RTW89_MAC_C2H_FUNC_C2H_LOG)
6085 dump = false;
6086 break;
6087 case RTW89_C2H_CAT_OUTSRC:
6088 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN &&
6089 class <= RTW89_PHY_C2H_CLASS_BTC_MAX)
6090 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func);
6091 else
6092 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func);
6093 break;
6094 }
6095
6096 if (dump)
6097 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
6098 }
6099
rtw89_fw_c2h_work(struct work_struct * work)6100 void rtw89_fw_c2h_work(struct work_struct *work)
6101 {
6102 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
6103 c2h_work);
6104 struct sk_buff *skb, *tmp;
6105
6106 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
6107 skb_unlink(skb, &rtwdev->c2h_queue);
6108 mutex_lock(&rtwdev->mutex);
6109 rtw89_fw_c2h_cmd_handle(rtwdev, skb);
6110 mutex_unlock(&rtwdev->mutex);
6111 dev_kfree_skb_any(skb);
6112 }
6113 }
6114
rtw89_fw_write_h2c_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_h2c_info * info)6115 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
6116 struct rtw89_mac_h2c_info *info)
6117 {
6118 const struct rtw89_chip_info *chip = rtwdev->chip;
6119 struct rtw89_fw_info *fw_info = &rtwdev->fw;
6120 const u32 *h2c_reg = chip->h2c_regs;
6121 u8 i, val, len;
6122 int ret;
6123
6124 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
6125 rtwdev, chip->h2c_ctrl_reg);
6126 if (ret) {
6127 rtw89_warn(rtwdev, "FW does not process h2c registers\n");
6128 return ret;
6129 }
6130
6131 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN,
6132 sizeof(info->u.h2creg[0]));
6133
6134 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK);
6135 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK);
6136
6137 for (i = 0; i < RTW89_H2CREG_MAX; i++)
6138 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]);
6139
6140 fw_info->h2c_counter++;
6141 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr,
6142 chip->h2c_counter_reg.mask, fw_info->h2c_counter);
6143 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
6144
6145 return 0;
6146 }
6147
rtw89_fw_read_c2h_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_c2h_info * info)6148 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
6149 struct rtw89_mac_c2h_info *info)
6150 {
6151 const struct rtw89_chip_info *chip = rtwdev->chip;
6152 struct rtw89_fw_info *fw_info = &rtwdev->fw;
6153 const u32 *c2h_reg = chip->c2h_regs;
6154 u32 ret;
6155 u8 i, val;
6156
6157 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
6158
6159 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
6160 RTW89_C2H_TIMEOUT, false, rtwdev,
6161 chip->c2h_ctrl_reg);
6162 if (ret) {
6163 rtw89_warn(rtwdev, "c2h reg timeout\n");
6164 return ret;
6165 }
6166
6167 for (i = 0; i < RTW89_C2HREG_MAX; i++)
6168 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
6169
6170 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
6171
6172 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK);
6173 info->content_len =
6174 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) -
6175 RTW89_C2HREG_HDR_LEN;
6176
6177 fw_info->c2h_counter++;
6178 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr,
6179 chip->c2h_counter_reg.mask, fw_info->c2h_counter);
6180
6181 return 0;
6182 }
6183
rtw89_fw_msg_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_h2c_info * h2c_info,struct rtw89_mac_c2h_info * c2h_info)6184 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
6185 struct rtw89_mac_h2c_info *h2c_info,
6186 struct rtw89_mac_c2h_info *c2h_info)
6187 {
6188 u32 ret;
6189
6190 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
6191 lockdep_assert_held(&rtwdev->mutex);
6192
6193 if (!h2c_info && !c2h_info)
6194 return -EINVAL;
6195
6196 if (!h2c_info)
6197 goto recv_c2h;
6198
6199 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info);
6200 if (ret)
6201 return ret;
6202
6203 recv_c2h:
6204 if (!c2h_info)
6205 return 0;
6206
6207 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info);
6208 if (ret)
6209 return ret;
6210
6211 return 0;
6212 }
6213
rtw89_fw_st_dbg_dump(struct rtw89_dev * rtwdev)6214 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
6215 {
6216 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
6217 rtw89_err(rtwdev, "[ERR]pwr is off\n");
6218 return;
6219 }
6220
6221 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0));
6222 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1));
6223 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2));
6224 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3));
6225 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n",
6226 rtw89_read32(rtwdev, R_AX_HALT_C2H));
6227 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n",
6228 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
6229
6230 rtw89_fw_prog_cnt_dump(rtwdev);
6231 }
6232
rtw89_release_pkt_list(struct rtw89_dev * rtwdev)6233 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
6234 {
6235 struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
6236 struct rtw89_pktofld_info *info, *tmp;
6237 u8 idx;
6238
6239 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
6240 if (!(rtwdev->chip->support_bands & BIT(idx)))
6241 continue;
6242
6243 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
6244 if (test_bit(info->id, rtwdev->pkt_offload))
6245 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
6246 list_del(&info->list);
6247 kfree(info);
6248 }
6249 }
6250 }
6251
rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev * rtwdev,struct cfg80211_scan_request * req,struct rtw89_pktofld_info * info,enum nl80211_band band,u8 ssid_idx)6252 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
6253 struct cfg80211_scan_request *req,
6254 struct rtw89_pktofld_info *info,
6255 enum nl80211_band band, u8 ssid_idx)
6256 {
6257 if (band != NL80211_BAND_6GHZ)
6258 return false;
6259
6260 if (req->ssids[ssid_idx].ssid_len) {
6261 memcpy(info->ssid, req->ssids[ssid_idx].ssid,
6262 req->ssids[ssid_idx].ssid_len);
6263 info->ssid_len = req->ssids[ssid_idx].ssid_len;
6264 return false;
6265 } else {
6266 info->wildcard_6ghz = true;
6267 return true;
6268 }
6269 }
6270
rtw89_append_probe_req_ie(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct sk_buff * skb,u8 ssid_idx)6271 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
6272 struct rtw89_vif_link *rtwvif_link,
6273 struct sk_buff *skb, u8 ssid_idx)
6274 {
6275 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6276 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6277 struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
6278 struct cfg80211_scan_request *req = rtwvif->scan_req;
6279 struct rtw89_pktofld_info *info;
6280 struct sk_buff *new;
6281 int ret = 0;
6282 u8 band;
6283
6284 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
6285 if (!(rtwdev->chip->support_bands & BIT(band)))
6286 continue;
6287
6288 new = skb_copy(skb, GFP_KERNEL);
6289 if (!new) {
6290 ret = -ENOMEM;
6291 goto out;
6292 }
6293 skb_put_data(new, ies->ies[band], ies->len[band]);
6294 skb_put_data(new, ies->common_ies, ies->common_ie_len);
6295
6296 info = kzalloc(sizeof(*info), GFP_KERNEL);
6297 if (!info) {
6298 ret = -ENOMEM;
6299 kfree_skb(new);
6300 goto out;
6301 }
6302
6303 rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx);
6304
6305 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
6306 if (ret) {
6307 kfree_skb(new);
6308 kfree(info);
6309 goto out;
6310 }
6311
6312 list_add_tail(&info->list, &scan_info->pkt_list[band]);
6313 kfree_skb(new);
6314 }
6315 out:
6316 return ret;
6317 }
6318
rtw89_hw_scan_update_probe_req(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)6319 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
6320 struct rtw89_vif_link *rtwvif_link)
6321 {
6322 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6323 struct cfg80211_scan_request *req = rtwvif->scan_req;
6324 struct sk_buff *skb;
6325 u8 num = req->n_ssids, i;
6326 int ret;
6327
6328 for (i = 0; i < num; i++) {
6329 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
6330 req->ssids[i].ssid,
6331 req->ssids[i].ssid_len,
6332 req->ie_len);
6333 if (!skb)
6334 return -ENOMEM;
6335
6336 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i);
6337 kfree_skb(skb);
6338
6339 if (ret)
6340 return ret;
6341 }
6342
6343 return 0;
6344 }
6345
rtw89_update_6ghz_rnr_chan(struct rtw89_dev * rtwdev,struct ieee80211_scan_ies * ies,struct cfg80211_scan_request * req,struct rtw89_mac_chinfo * ch_info)6346 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev,
6347 struct ieee80211_scan_ies *ies,
6348 struct cfg80211_scan_request *req,
6349 struct rtw89_mac_chinfo *ch_info)
6350 {
6351 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
6352 struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
6353 struct cfg80211_scan_6ghz_params *params;
6354 struct rtw89_pktofld_info *info, *tmp;
6355 struct ieee80211_hdr *hdr;
6356 struct sk_buff *skb;
6357 bool found;
6358 int ret = 0;
6359 u8 i;
6360
6361 if (!req->n_6ghz_params)
6362 return 0;
6363
6364 for (i = 0; i < req->n_6ghz_params; i++) {
6365 params = &req->scan_6ghz_params[i];
6366
6367 if (req->channels[params->channel_idx]->hw_value !=
6368 ch_info->pri_ch)
6369 continue;
6370
6371 found = false;
6372 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) {
6373 if (ether_addr_equal(tmp->bssid, params->bssid)) {
6374 found = true;
6375 break;
6376 }
6377 }
6378 if (found)
6379 continue;
6380
6381 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
6382 NULL, 0, req->ie_len);
6383 if (!skb)
6384 return -ENOMEM;
6385
6386 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]);
6387 skb_put_data(skb, ies->common_ies, ies->common_ie_len);
6388 hdr = (struct ieee80211_hdr *)skb->data;
6389 ether_addr_copy(hdr->addr3, params->bssid);
6390
6391 info = kzalloc(sizeof(*info), GFP_KERNEL);
6392 if (!info) {
6393 ret = -ENOMEM;
6394 kfree_skb(skb);
6395 goto out;
6396 }
6397
6398 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
6399 if (ret) {
6400 kfree_skb(skb);
6401 kfree(info);
6402 goto out;
6403 }
6404
6405 ether_addr_copy(info->bssid, params->bssid);
6406 info->channel_6ghz = req->channels[params->channel_idx]->hw_value;
6407 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]);
6408
6409 ch_info->tx_pkt = true;
6410 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
6411
6412 kfree_skb(skb);
6413 }
6414
6415 out:
6416 return ret;
6417 }
6418
rtw89_pno_scan_add_chan_ax(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo * ch_info)6419 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev,
6420 int chan_type, int ssid_num,
6421 struct rtw89_mac_chinfo *ch_info)
6422 {
6423 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
6424 struct rtw89_pktofld_info *info;
6425 u8 probe_count = 0;
6426
6427 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
6428 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
6429 ch_info->bw = RTW89_SCAN_WIDTH;
6430 ch_info->tx_pkt = true;
6431 ch_info->cfg_tx_pwr = false;
6432 ch_info->tx_pwr_idx = 0;
6433 ch_info->tx_null = false;
6434 ch_info->pause_data = false;
6435 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
6436
6437 if (ssid_num) {
6438 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) {
6439 if (info->channel_6ghz &&
6440 ch_info->pri_ch != info->channel_6ghz)
6441 continue;
6442 else if (info->channel_6ghz && probe_count != 0)
6443 ch_info->period += RTW89_CHANNEL_TIME_6G;
6444
6445 if (info->wildcard_6ghz)
6446 continue;
6447
6448 ch_info->pkt_id[probe_count++] = info->id;
6449 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
6450 break;
6451 }
6452 ch_info->num_pkt = probe_count;
6453 }
6454
6455 switch (chan_type) {
6456 case RTW89_CHAN_DFS:
6457 if (ch_info->ch_band != RTW89_BAND_6G)
6458 ch_info->period = max_t(u8, ch_info->period,
6459 RTW89_DFS_CHAN_TIME);
6460 ch_info->dwell_time = RTW89_DWELL_TIME;
6461 break;
6462 case RTW89_CHAN_ACTIVE:
6463 break;
6464 default:
6465 rtw89_err(rtwdev, "Channel type out of bound\n");
6466 }
6467 }
6468
rtw89_hw_scan_add_chan(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo * ch_info)6469 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
6470 int ssid_num,
6471 struct rtw89_mac_chinfo *ch_info)
6472 {
6473 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6474 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
6475 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6476 struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
6477 struct cfg80211_scan_request *req = rtwvif->scan_req;
6478 struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
6479 struct rtw89_pktofld_info *info;
6480 u8 band, probe_count = 0;
6481 int ret;
6482
6483 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
6484 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
6485 ch_info->bw = RTW89_SCAN_WIDTH;
6486 ch_info->tx_pkt = true;
6487 ch_info->cfg_tx_pwr = false;
6488 ch_info->tx_pwr_idx = 0;
6489 ch_info->tx_null = false;
6490 ch_info->pause_data = false;
6491 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
6492
6493 if (ch_info->ch_band == RTW89_BAND_6G) {
6494 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
6495 !ch_info->is_psc) {
6496 ch_info->tx_pkt = false;
6497 if (!req->duration_mandatory)
6498 ch_info->period -= RTW89_DWELL_TIME_6G;
6499 }
6500 }
6501
6502 ret = rtw89_update_6ghz_rnr_chan(rtwdev, ies, req, ch_info);
6503 if (ret)
6504 rtw89_warn(rtwdev, "RNR fails: %d\n", ret);
6505
6506 if (ssid_num) {
6507 band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
6508
6509 list_for_each_entry(info, &scan_info->pkt_list[band], list) {
6510 if (info->channel_6ghz &&
6511 ch_info->pri_ch != info->channel_6ghz)
6512 continue;
6513 else if (info->channel_6ghz && probe_count != 0)
6514 ch_info->period += RTW89_CHANNEL_TIME_6G;
6515
6516 if (info->wildcard_6ghz)
6517 continue;
6518
6519 ch_info->pkt_id[probe_count++] = info->id;
6520 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
6521 break;
6522 }
6523 ch_info->num_pkt = probe_count;
6524 }
6525
6526 switch (chan_type) {
6527 case RTW89_CHAN_OPERATE:
6528 ch_info->central_ch = op->channel;
6529 ch_info->pri_ch = op->primary_channel;
6530 ch_info->ch_band = op->band_type;
6531 ch_info->bw = op->band_width;
6532 ch_info->tx_null = true;
6533 ch_info->num_pkt = 0;
6534 break;
6535 case RTW89_CHAN_DFS:
6536 if (ch_info->ch_band != RTW89_BAND_6G)
6537 ch_info->period = max_t(u8, ch_info->period,
6538 RTW89_DFS_CHAN_TIME);
6539 ch_info->dwell_time = RTW89_DWELL_TIME;
6540 ch_info->pause_data = true;
6541 break;
6542 case RTW89_CHAN_ACTIVE:
6543 ch_info->pause_data = true;
6544 break;
6545 default:
6546 rtw89_err(rtwdev, "Channel type out of bound\n");
6547 }
6548 }
6549
rtw89_pno_scan_add_chan_be(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo_be * ch_info)6550 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
6551 int ssid_num,
6552 struct rtw89_mac_chinfo_be *ch_info)
6553 {
6554 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
6555 struct rtw89_pktofld_info *info;
6556 u8 probe_count = 0, i;
6557
6558 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
6559 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
6560 ch_info->bw = RTW89_SCAN_WIDTH;
6561 ch_info->tx_null = false;
6562 ch_info->pause_data = false;
6563 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
6564
6565 if (ssid_num) {
6566 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) {
6567 ch_info->pkt_id[probe_count++] = info->id;
6568 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
6569 break;
6570 }
6571 }
6572
6573 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
6574 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
6575
6576 switch (chan_type) {
6577 case RTW89_CHAN_DFS:
6578 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
6579 ch_info->dwell_time = RTW89_DWELL_TIME;
6580 break;
6581 case RTW89_CHAN_ACTIVE:
6582 break;
6583 default:
6584 rtw89_warn(rtwdev, "Channel type out of bound\n");
6585 break;
6586 }
6587 }
6588
rtw89_hw_scan_add_chan_be(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo_be * ch_info)6589 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
6590 int ssid_num,
6591 struct rtw89_mac_chinfo_be *ch_info)
6592 {
6593 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6594 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
6595 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6596 struct cfg80211_scan_request *req = rtwvif->scan_req;
6597 struct rtw89_pktofld_info *info;
6598 u8 band, probe_count = 0, i;
6599
6600 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
6601 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
6602 ch_info->bw = RTW89_SCAN_WIDTH;
6603 ch_info->tx_null = false;
6604 ch_info->pause_data = false;
6605 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
6606
6607 if (ssid_num) {
6608 band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
6609
6610 list_for_each_entry(info, &scan_info->pkt_list[band], list) {
6611 if (info->channel_6ghz &&
6612 ch_info->pri_ch != info->channel_6ghz)
6613 continue;
6614
6615 if (info->wildcard_6ghz)
6616 continue;
6617
6618 ch_info->pkt_id[probe_count++] = info->id;
6619 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
6620 break;
6621 }
6622 }
6623
6624 if (ch_info->ch_band == RTW89_BAND_6G) {
6625 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
6626 !ch_info->is_psc) {
6627 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
6628 if (!req->duration_mandatory)
6629 ch_info->period -= RTW89_DWELL_TIME_6G;
6630 }
6631 }
6632
6633 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
6634 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
6635
6636 switch (chan_type) {
6637 case RTW89_CHAN_DFS:
6638 if (ch_info->ch_band != RTW89_BAND_6G)
6639 ch_info->period =
6640 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
6641 ch_info->dwell_time = RTW89_DWELL_TIME;
6642 ch_info->pause_data = true;
6643 break;
6644 case RTW89_CHAN_ACTIVE:
6645 ch_info->pause_data = true;
6646 break;
6647 default:
6648 rtw89_warn(rtwdev, "Channel type out of bound\n");
6649 break;
6650 }
6651 }
6652
rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)6653 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
6654 struct rtw89_vif_link *rtwvif_link)
6655 {
6656 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
6657 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
6658 struct rtw89_mac_chinfo *ch_info, *tmp;
6659 struct ieee80211_channel *channel;
6660 struct list_head chan_list;
6661 int list_len;
6662 enum rtw89_chan_type type;
6663 int ret = 0;
6664 u32 idx;
6665
6666 INIT_LIST_HEAD(&chan_list);
6667 for (idx = 0, list_len = 0;
6668 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
6669 idx++, list_len++) {
6670 channel = nd_config->channels[idx];
6671 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
6672 if (!ch_info) {
6673 ret = -ENOMEM;
6674 goto out;
6675 }
6676
6677 ch_info->period = RTW89_CHANNEL_TIME;
6678 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
6679 ch_info->central_ch = channel->hw_value;
6680 ch_info->pri_ch = channel->hw_value;
6681 ch_info->is_psc = cfg80211_channel_is_psc(channel);
6682
6683 if (channel->flags &
6684 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
6685 type = RTW89_CHAN_DFS;
6686 else
6687 type = RTW89_CHAN_ACTIVE;
6688
6689 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info);
6690 list_add_tail(&ch_info->list, &chan_list);
6691 }
6692 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
6693
6694 out:
6695 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
6696 list_del(&ch_info->list);
6697 kfree(ch_info);
6698 }
6699
6700 return ret;
6701 }
6702
rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool connected)6703 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
6704 struct rtw89_vif_link *rtwvif_link, bool connected)
6705 {
6706 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6707 struct cfg80211_scan_request *req = rtwvif->scan_req;
6708 struct rtw89_mac_chinfo *ch_info, *tmp;
6709 struct ieee80211_channel *channel;
6710 struct list_head chan_list;
6711 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
6712 int list_len, off_chan_time = 0;
6713 enum rtw89_chan_type type;
6714 int ret = 0;
6715 u32 idx;
6716
6717 INIT_LIST_HEAD(&chan_list);
6718 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
6719 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
6720 idx++, list_len++) {
6721 channel = req->channels[idx];
6722 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
6723 if (!ch_info) {
6724 ret = -ENOMEM;
6725 goto out;
6726 }
6727
6728 if (req->duration)
6729 ch_info->period = req->duration;
6730 else if (channel->band == NL80211_BAND_6GHZ)
6731 ch_info->period = RTW89_CHANNEL_TIME_6G +
6732 RTW89_DWELL_TIME_6G;
6733 else
6734 ch_info->period = RTW89_CHANNEL_TIME;
6735
6736 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
6737 ch_info->central_ch = channel->hw_value;
6738 ch_info->pri_ch = channel->hw_value;
6739 ch_info->rand_seq_num = random_seq;
6740 ch_info->is_psc = cfg80211_channel_is_psc(channel);
6741
6742 if (channel->flags &
6743 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
6744 type = RTW89_CHAN_DFS;
6745 else
6746 type = RTW89_CHAN_ACTIVE;
6747 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
6748
6749 if (connected &&
6750 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
6751 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
6752 if (!tmp) {
6753 ret = -ENOMEM;
6754 kfree(ch_info);
6755 goto out;
6756 }
6757
6758 type = RTW89_CHAN_OPERATE;
6759 tmp->period = req->duration_mandatory ?
6760 req->duration : RTW89_CHANNEL_TIME;
6761 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
6762 list_add_tail(&tmp->list, &chan_list);
6763 off_chan_time = 0;
6764 list_len++;
6765 }
6766 list_add_tail(&ch_info->list, &chan_list);
6767 off_chan_time += ch_info->period;
6768 }
6769 rtwdev->scan_info.last_chan_idx = idx;
6770 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
6771
6772 out:
6773 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
6774 list_del(&ch_info->list);
6775 kfree(ch_info);
6776 }
6777
6778 return ret;
6779 }
6780
rtw89_pno_scan_add_chan_list_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)6781 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
6782 struct rtw89_vif_link *rtwvif_link)
6783 {
6784 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
6785 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
6786 struct rtw89_mac_chinfo_be *ch_info, *tmp;
6787 struct ieee80211_channel *channel;
6788 struct list_head chan_list;
6789 enum rtw89_chan_type type;
6790 int list_len, ret;
6791 u32 idx;
6792
6793 INIT_LIST_HEAD(&chan_list);
6794
6795 for (idx = 0, list_len = 0;
6796 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
6797 idx++, list_len++) {
6798 channel = nd_config->channels[idx];
6799 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
6800 if (!ch_info) {
6801 ret = -ENOMEM;
6802 goto out;
6803 }
6804
6805 ch_info->period = RTW89_CHANNEL_TIME;
6806 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
6807 ch_info->central_ch = channel->hw_value;
6808 ch_info->pri_ch = channel->hw_value;
6809 ch_info->is_psc = cfg80211_channel_is_psc(channel);
6810
6811 if (channel->flags &
6812 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
6813 type = RTW89_CHAN_DFS;
6814 else
6815 type = RTW89_CHAN_ACTIVE;
6816
6817 rtw89_pno_scan_add_chan_be(rtwdev, type,
6818 nd_config->n_match_sets, ch_info);
6819 list_add_tail(&ch_info->list, &chan_list);
6820 }
6821
6822 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list,
6823 rtwvif_link);
6824
6825 out:
6826 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
6827 list_del(&ch_info->list);
6828 kfree(ch_info);
6829 }
6830
6831 return ret;
6832 }
6833
rtw89_hw_scan_add_chan_list_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool connected)6834 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
6835 struct rtw89_vif_link *rtwvif_link, bool connected)
6836 {
6837 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6838 struct cfg80211_scan_request *req = rtwvif->scan_req;
6839 struct rtw89_mac_chinfo_be *ch_info, *tmp;
6840 struct ieee80211_channel *channel;
6841 struct list_head chan_list;
6842 enum rtw89_chan_type type;
6843 int list_len, ret;
6844 bool random_seq;
6845 u32 idx;
6846
6847 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN);
6848 INIT_LIST_HEAD(&chan_list);
6849
6850 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
6851 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
6852 idx++, list_len++) {
6853 channel = req->channels[idx];
6854 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
6855 if (!ch_info) {
6856 ret = -ENOMEM;
6857 goto out;
6858 }
6859
6860 if (req->duration)
6861 ch_info->period = req->duration;
6862 else if (channel->band == NL80211_BAND_6GHZ)
6863 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
6864 else
6865 ch_info->period = RTW89_CHANNEL_TIME;
6866
6867 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
6868 ch_info->central_ch = channel->hw_value;
6869 ch_info->pri_ch = channel->hw_value;
6870 ch_info->rand_seq_num = random_seq;
6871 ch_info->is_psc = cfg80211_channel_is_psc(channel);
6872
6873 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
6874 type = RTW89_CHAN_DFS;
6875 else
6876 type = RTW89_CHAN_ACTIVE;
6877 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info);
6878
6879 list_add_tail(&ch_info->list, &chan_list);
6880 }
6881
6882 rtwdev->scan_info.last_chan_idx = idx;
6883 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list,
6884 rtwvif_link);
6885
6886 out:
6887 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
6888 list_del(&ch_info->list);
6889 kfree(ch_info);
6890 }
6891
6892 return ret;
6893 }
6894
rtw89_hw_scan_prehandle(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool connected)6895 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
6896 struct rtw89_vif_link *rtwvif_link, bool connected)
6897 {
6898 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
6899 int ret;
6900
6901 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link);
6902 if (ret) {
6903 rtw89_err(rtwdev, "Update probe request failed\n");
6904 goto out;
6905 }
6906 ret = mac->add_chan_list(rtwdev, rtwvif_link, connected);
6907 out:
6908 return ret;
6909 }
6910
rtw89_hw_scan_start(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct ieee80211_scan_request * scan_req)6911 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
6912 struct rtw89_vif_link *rtwvif_link,
6913 struct ieee80211_scan_request *scan_req)
6914 {
6915 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
6916 struct cfg80211_scan_request *req = &scan_req->req;
6917 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
6918 rtwvif_link->chanctx_idx);
6919 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6920 u32 rx_fltr = rtwdev->hal.rx_fltr;
6921 u8 mac_addr[ETH_ALEN];
6922 u32 reg;
6923
6924 /* clone op and keep it during scan */
6925 rtwdev->scan_info.op_chan = *chan;
6926
6927 rtwdev->scan_info.scanning_vif = rtwvif_link;
6928 rtwdev->scan_info.last_chan_idx = 0;
6929 rtwdev->scan_info.abort = false;
6930 rtwvif->scan_ies = &scan_req->ies;
6931 rtwvif->scan_req = req;
6932 ieee80211_stop_queues(rtwdev->hw);
6933 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false);
6934
6935 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
6936 get_random_mask_addr(mac_addr, req->mac_addr,
6937 req->mac_addr_mask);
6938 else
6939 ether_addr_copy(mac_addr, rtwvif_link->mac_addr);
6940 rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true);
6941
6942 rx_fltr &= ~B_AX_A_BCN_CHK_EN;
6943 rx_fltr &= ~B_AX_A_BC;
6944 rx_fltr &= ~B_AX_A_A1_MATCH;
6945
6946 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
6947 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr);
6948
6949 rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN);
6950 }
6951
6952 struct rtw89_hw_scan_complete_cb_data {
6953 struct rtw89_vif_link *rtwvif_link;
6954 bool aborted;
6955 };
6956
rtw89_hw_scan_complete_cb(struct rtw89_dev * rtwdev,void * data)6957 static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data)
6958 {
6959 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
6960 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6961 struct rtw89_hw_scan_complete_cb_data *cb_data = data;
6962 struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link;
6963 struct cfg80211_scan_info info = {
6964 .aborted = cb_data->aborted,
6965 };
6966 struct rtw89_vif *rtwvif;
6967 u32 reg;
6968
6969 if (!rtwvif_link)
6970 return -EINVAL;
6971
6972 rtwvif = rtwvif_link->rtwvif;
6973
6974 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
6975 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr);
6976
6977 rtw89_core_scan_complete(rtwdev, rtwvif_link, true);
6978 ieee80211_scan_completed(rtwdev->hw, &info);
6979 ieee80211_wake_queues(rtwdev->hw);
6980 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true);
6981 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
6982
6983 rtw89_release_pkt_list(rtwdev);
6984 rtwvif->scan_req = NULL;
6985 rtwvif->scan_ies = NULL;
6986 scan_info->last_chan_idx = 0;
6987 scan_info->scanning_vif = NULL;
6988 scan_info->abort = false;
6989
6990 return 0;
6991 }
6992
rtw89_hw_scan_complete(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool aborted)6993 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
6994 struct rtw89_vif_link *rtwvif_link,
6995 bool aborted)
6996 {
6997 struct rtw89_hw_scan_complete_cb_data cb_data = {
6998 .rtwvif_link = rtwvif_link,
6999 .aborted = aborted,
7000 };
7001 const struct rtw89_chanctx_cb_parm cb_parm = {
7002 .cb = rtw89_hw_scan_complete_cb,
7003 .data = &cb_data,
7004 .caller = __func__,
7005 };
7006
7007 /* The things here needs to be done after setting channel (for coex)
7008 * and before proceeding entity mode (for MCC). So, pass a callback
7009 * of them for the right sequence rather than doing them directly.
7010 */
7011 rtw89_chanctx_proceed(rtwdev, &cb_parm);
7012 }
7013
rtw89_hw_scan_abort(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)7014 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev,
7015 struct rtw89_vif_link *rtwvif_link)
7016 {
7017 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7018 int ret;
7019
7020 scan_info->abort = true;
7021
7022 ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false);
7023 if (ret)
7024 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret);
7025
7026 /* Indicate ieee80211_scan_completed() before returning, which is safe
7027 * because scan abort command always waits for completion of
7028 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan
7029 * work properly.
7030 */
7031 rtw89_hw_scan_complete(rtwdev, rtwvif_link, true);
7032 }
7033
rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev * rtwdev)7034 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
7035 {
7036 struct rtw89_vif_link *rtwvif_link;
7037 struct rtw89_vif *rtwvif;
7038 unsigned int link_id;
7039
7040 rtw89_for_each_rtwvif(rtwdev, rtwvif) {
7041 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
7042 /* This variable implies connected or during attempt to connect */
7043 if (!is_zero_ether_addr(rtwvif_link->bssid))
7044 return true;
7045 }
7046 }
7047
7048 return false;
7049 }
7050
rtw89_hw_scan_offload(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7051 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev,
7052 struct rtw89_vif_link *rtwvif_link,
7053 bool enable)
7054 {
7055 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
7056 struct rtw89_scan_option opt = {0};
7057 bool connected;
7058 int ret = 0;
7059
7060 if (!rtwvif_link)
7061 return -EINVAL;
7062
7063 connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
7064 opt.enable = enable;
7065 opt.target_ch_mode = connected;
7066 if (enable) {
7067 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, connected);
7068 if (ret)
7069 goto out;
7070 }
7071
7072 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
7073 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP;
7074 opt.scan_mode = RTW89_SCAN_MODE_SA;
7075 opt.band = rtwvif_link->mac_idx;
7076 opt.num_macc_role = 0;
7077 opt.mlo_mode = rtwdev->mlo_dbcc_mode;
7078 opt.num_opch = connected ? 1 : 0;
7079 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID;
7080 }
7081
7082 ret = mac->scan_offload(rtwdev, &opt, rtwvif_link, false);
7083 out:
7084 return ret;
7085 }
7086
7087 #define H2C_FW_CPU_EXCEPTION_LEN 4
7088 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566
rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev * rtwdev)7089 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
7090 {
7091 struct sk_buff *skb;
7092 int ret;
7093
7094 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
7095 if (!skb) {
7096 rtw89_err(rtwdev,
7097 "failed to alloc skb for fw cpu exception\n");
7098 return -ENOMEM;
7099 }
7100
7101 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN);
7102 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data,
7103 H2C_FW_CPU_EXCEPTION_TYPE_DEF);
7104
7105 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7106 H2C_CAT_TEST,
7107 H2C_CL_FW_STATUS_TEST,
7108 H2C_FUNC_CPU_EXCEPTION, 0, 0,
7109 H2C_FW_CPU_EXCEPTION_LEN);
7110
7111 ret = rtw89_h2c_tx(rtwdev, skb, false);
7112 if (ret) {
7113 rtw89_err(rtwdev, "failed to send h2c\n");
7114 goto fail;
7115 }
7116
7117 return 0;
7118
7119 fail:
7120 dev_kfree_skb_any(skb);
7121 return ret;
7122 }
7123
7124 #define H2C_PKT_DROP_LEN 24
rtw89_fw_h2c_pkt_drop(struct rtw89_dev * rtwdev,const struct rtw89_pkt_drop_params * params)7125 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
7126 const struct rtw89_pkt_drop_params *params)
7127 {
7128 struct sk_buff *skb;
7129 int ret;
7130
7131 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN);
7132 if (!skb) {
7133 rtw89_err(rtwdev,
7134 "failed to alloc skb for packet drop\n");
7135 return -ENOMEM;
7136 }
7137
7138 switch (params->sel) {
7139 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE:
7140 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
7141 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
7142 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
7143 case RTW89_PKT_DROP_SEL_BAND_ONCE:
7144 break;
7145 default:
7146 rtw89_debug(rtwdev, RTW89_DBG_FW,
7147 "H2C of pkt drop might not fully support sel: %d yet\n",
7148 params->sel);
7149 break;
7150 }
7151
7152 skb_put(skb, H2C_PKT_DROP_LEN);
7153 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel);
7154 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid);
7155 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band);
7156 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
7157 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
7158 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
7159 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data,
7160 params->macid_band_sel[0]);
7161 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data,
7162 params->macid_band_sel[1]);
7163 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data,
7164 params->macid_band_sel[2]);
7165 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data,
7166 params->macid_band_sel[3]);
7167
7168 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7169 H2C_CAT_MAC,
7170 H2C_CL_MAC_FW_OFLD,
7171 H2C_FUNC_PKT_DROP, 0, 0,
7172 H2C_PKT_DROP_LEN);
7173
7174 ret = rtw89_h2c_tx(rtwdev, skb, false);
7175 if (ret) {
7176 rtw89_err(rtwdev, "failed to send h2c\n");
7177 goto fail;
7178 }
7179
7180 return 0;
7181
7182 fail:
7183 dev_kfree_skb_any(skb);
7184 return ret;
7185 }
7186
7187 #define H2C_KEEP_ALIVE_LEN 4
rtw89_fw_h2c_keep_alive(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7188 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
7189 bool enable)
7190 {
7191 struct sk_buff *skb;
7192 u8 pkt_id = 0;
7193 int ret;
7194
7195 if (enable) {
7196 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
7197 RTW89_PKT_OFLD_TYPE_NULL_DATA,
7198 &pkt_id);
7199 if (ret)
7200 return -EPERM;
7201 }
7202
7203 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN);
7204 if (!skb) {
7205 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
7206 return -ENOMEM;
7207 }
7208
7209 skb_put(skb, H2C_KEEP_ALIVE_LEN);
7210
7211 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable);
7212 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id);
7213 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5);
7214 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id);
7215
7216 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7217 H2C_CAT_MAC,
7218 H2C_CL_MAC_WOW,
7219 H2C_FUNC_KEEP_ALIVE, 0, 1,
7220 H2C_KEEP_ALIVE_LEN);
7221
7222 ret = rtw89_h2c_tx(rtwdev, skb, false);
7223 if (ret) {
7224 rtw89_err(rtwdev, "failed to send h2c\n");
7225 goto fail;
7226 }
7227
7228 return 0;
7229
7230 fail:
7231 dev_kfree_skb_any(skb);
7232
7233 return ret;
7234 }
7235
rtw89_fw_h2c_arp_offload(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7236 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
7237 bool enable)
7238 {
7239 struct rtw89_h2c_arp_offload *h2c;
7240 u32 len = sizeof(*h2c);
7241 struct sk_buff *skb;
7242 u8 pkt_id = 0;
7243 int ret;
7244
7245 if (enable) {
7246 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
7247 RTW89_PKT_OFLD_TYPE_ARP_RSP,
7248 &pkt_id);
7249 if (ret)
7250 return ret;
7251 }
7252
7253 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7254 if (!skb) {
7255 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n");
7256 return -ENOMEM;
7257 }
7258
7259 skb_put(skb, len);
7260 h2c = (struct rtw89_h2c_arp_offload *)skb->data;
7261
7262 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) |
7263 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) |
7264 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) |
7265 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID);
7266
7267 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7268 H2C_CAT_MAC,
7269 H2C_CL_MAC_WOW,
7270 H2C_FUNC_ARP_OFLD, 0, 1,
7271 len);
7272
7273 ret = rtw89_h2c_tx(rtwdev, skb, false);
7274 if (ret) {
7275 rtw89_err(rtwdev, "failed to send h2c\n");
7276 goto fail;
7277 }
7278
7279 return 0;
7280
7281 fail:
7282 dev_kfree_skb_any(skb);
7283
7284 return ret;
7285 }
7286
7287 #define H2C_DISCONNECT_DETECT_LEN 8
rtw89_fw_h2c_disconnect_detect(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7288 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
7289 struct rtw89_vif_link *rtwvif_link, bool enable)
7290 {
7291 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7292 struct sk_buff *skb;
7293 u8 macid = rtwvif_link->mac_id;
7294 int ret;
7295
7296 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN);
7297 if (!skb) {
7298 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
7299 return -ENOMEM;
7300 }
7301
7302 skb_put(skb, H2C_DISCONNECT_DETECT_LEN);
7303
7304 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
7305 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable);
7306 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable);
7307 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid);
7308 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100);
7309 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5);
7310 }
7311
7312 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7313 H2C_CAT_MAC,
7314 H2C_CL_MAC_WOW,
7315 H2C_FUNC_DISCONNECT_DETECT, 0, 1,
7316 H2C_DISCONNECT_DETECT_LEN);
7317
7318 ret = rtw89_h2c_tx(rtwdev, skb, false);
7319 if (ret) {
7320 rtw89_err(rtwdev, "failed to send h2c\n");
7321 goto fail;
7322 }
7323
7324 return 0;
7325
7326 fail:
7327 dev_kfree_skb_any(skb);
7328
7329 return ret;
7330 }
7331
rtw89_fw_h2c_cfg_pno(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7332 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
7333 bool enable)
7334 {
7335 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7336 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
7337 struct rtw89_h2c_cfg_nlo *h2c;
7338 u32 len = sizeof(*h2c);
7339 struct sk_buff *skb;
7340 int ret, i;
7341
7342 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7343 if (!skb) {
7344 rtw89_err(rtwdev, "failed to alloc skb for nlo\n");
7345 return -ENOMEM;
7346 }
7347
7348 skb_put(skb, len);
7349 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data;
7350
7351 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) |
7352 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) |
7353 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID);
7354
7355 if (enable) {
7356 h2c->nlo_cnt = nd_config->n_match_sets;
7357 for (i = 0 ; i < nd_config->n_match_sets; i++) {
7358 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len;
7359 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid,
7360 nd_config->match_sets[i].ssid.ssid_len);
7361 }
7362 }
7363
7364 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7365 H2C_CAT_MAC,
7366 H2C_CL_MAC_WOW,
7367 H2C_FUNC_NLO, 0, 1,
7368 len);
7369
7370 ret = rtw89_h2c_tx(rtwdev, skb, false);
7371 if (ret) {
7372 rtw89_err(rtwdev, "failed to send h2c\n");
7373 goto fail;
7374 }
7375
7376 return 0;
7377
7378 fail:
7379 dev_kfree_skb_any(skb);
7380 return ret;
7381 }
7382
rtw89_fw_h2c_wow_global(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7383 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
7384 bool enable)
7385 {
7386 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7387 struct rtw89_h2c_wow_global *h2c;
7388 u8 macid = rtwvif_link->mac_id;
7389 u32 len = sizeof(*h2c);
7390 struct sk_buff *skb;
7391 int ret;
7392
7393 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7394 if (!skb) {
7395 rtw89_err(rtwdev, "failed to alloc skb for wow global\n");
7396 return -ENOMEM;
7397 }
7398
7399 skb_put(skb, len);
7400 h2c = (struct rtw89_h2c_wow_global *)skb->data;
7401
7402 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) |
7403 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) |
7404 le32_encode_bits(rtw_wow->ptk_alg,
7405 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) |
7406 le32_encode_bits(rtw_wow->gtk_alg,
7407 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO);
7408 h2c->key_info = rtw_wow->key_info;
7409
7410 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7411 H2C_CAT_MAC,
7412 H2C_CL_MAC_WOW,
7413 H2C_FUNC_WOW_GLOBAL, 0, 1,
7414 len);
7415
7416 ret = rtw89_h2c_tx(rtwdev, skb, false);
7417 if (ret) {
7418 rtw89_err(rtwdev, "failed to send h2c\n");
7419 goto fail;
7420 }
7421
7422 return 0;
7423
7424 fail:
7425 dev_kfree_skb_any(skb);
7426
7427 return ret;
7428 }
7429
7430 #define H2C_WAKEUP_CTRL_LEN 4
rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7431 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
7432 struct rtw89_vif_link *rtwvif_link,
7433 bool enable)
7434 {
7435 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7436 struct sk_buff *skb;
7437 u8 macid = rtwvif_link->mac_id;
7438 int ret;
7439
7440 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN);
7441 if (!skb) {
7442 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n");
7443 return -ENOMEM;
7444 }
7445
7446 skb_put(skb, H2C_WAKEUP_CTRL_LEN);
7447
7448 if (rtw_wow->pattern_cnt)
7449 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable);
7450 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
7451 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable);
7452 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
7453 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable);
7454
7455 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid);
7456
7457 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7458 H2C_CAT_MAC,
7459 H2C_CL_MAC_WOW,
7460 H2C_FUNC_WAKEUP_CTRL, 0, 1,
7461 H2C_WAKEUP_CTRL_LEN);
7462
7463 ret = rtw89_h2c_tx(rtwdev, skb, false);
7464 if (ret) {
7465 rtw89_err(rtwdev, "failed to send h2c\n");
7466 goto fail;
7467 }
7468
7469 return 0;
7470
7471 fail:
7472 dev_kfree_skb_any(skb);
7473
7474 return ret;
7475 }
7476
7477 #define H2C_WOW_CAM_UPD_LEN 24
rtw89_fw_wow_cam_update(struct rtw89_dev * rtwdev,struct rtw89_wow_cam_info * cam_info)7478 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
7479 struct rtw89_wow_cam_info *cam_info)
7480 {
7481 struct sk_buff *skb;
7482 int ret;
7483
7484 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN);
7485 if (!skb) {
7486 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
7487 return -ENOMEM;
7488 }
7489
7490 skb_put(skb, H2C_WOW_CAM_UPD_LEN);
7491
7492 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w);
7493 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx);
7494 if (cam_info->valid) {
7495 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]);
7496 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]);
7497 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]);
7498 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]);
7499 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc);
7500 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data,
7501 cam_info->negative_pattern_match);
7502 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data,
7503 cam_info->skip_mac_hdr);
7504 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc);
7505 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc);
7506 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc);
7507 }
7508 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid);
7509
7510 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7511 H2C_CAT_MAC,
7512 H2C_CL_MAC_WOW,
7513 H2C_FUNC_WOW_CAM_UPD, 0, 1,
7514 H2C_WOW_CAM_UPD_LEN);
7515
7516 ret = rtw89_h2c_tx(rtwdev, skb, false);
7517 if (ret) {
7518 rtw89_err(rtwdev, "failed to send h2c\n");
7519 goto fail;
7520 }
7521
7522 return 0;
7523 fail:
7524 dev_kfree_skb_any(skb);
7525
7526 return ret;
7527 }
7528
rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7529 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
7530 struct rtw89_vif_link *rtwvif_link,
7531 bool enable)
7532 {
7533 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7534 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info;
7535 struct rtw89_h2c_wow_gtk_ofld *h2c;
7536 u8 macid = rtwvif_link->mac_id;
7537 u32 len = sizeof(*h2c);
7538 u8 pkt_id_sa_query = 0;
7539 struct sk_buff *skb;
7540 u8 pkt_id_eapol = 0;
7541 int ret;
7542
7543 if (!rtw_wow->gtk_alg)
7544 return 0;
7545
7546 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7547 if (!skb) {
7548 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n");
7549 return -ENOMEM;
7550 }
7551
7552 skb_put(skb, len);
7553 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data;
7554
7555 if (!enable)
7556 goto hdr;
7557
7558 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
7559 RTW89_PKT_OFLD_TYPE_EAPOL_KEY,
7560 &pkt_id_eapol);
7561 if (ret)
7562 goto fail;
7563
7564 if (gtk_info->igtk_keyid) {
7565 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
7566 RTW89_PKT_OFLD_TYPE_SA_QUERY,
7567 &pkt_id_sa_query);
7568 if (ret)
7569 goto fail;
7570 }
7571
7572 /* not support TKIP yet */
7573 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) |
7574 le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) |
7575 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0,
7576 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) |
7577 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) |
7578 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID);
7579 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0,
7580 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) |
7581 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT);
7582 h2c->gtk_info = rtw_wow->gtk_info;
7583
7584 hdr:
7585 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7586 H2C_CAT_MAC,
7587 H2C_CL_MAC_WOW,
7588 H2C_FUNC_GTK_OFLD, 0, 1,
7589 len);
7590
7591 ret = rtw89_h2c_tx(rtwdev, skb, false);
7592 if (ret) {
7593 rtw89_err(rtwdev, "failed to send h2c\n");
7594 goto fail;
7595 }
7596 return 0;
7597 fail:
7598 dev_kfree_skb_any(skb);
7599
7600 return ret;
7601 }
7602
rtw89_fw_h2c_fwips(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7603 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
7604 bool enable)
7605 {
7606 struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait;
7607 struct rtw89_h2c_fwips *h2c;
7608 u32 len = sizeof(*h2c);
7609 struct sk_buff *skb;
7610
7611 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7612 if (!skb) {
7613 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n");
7614 return -ENOMEM;
7615 }
7616 skb_put(skb, len);
7617 h2c = (struct rtw89_h2c_fwips *)skb->data;
7618
7619 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) |
7620 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE);
7621
7622 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7623 H2C_CAT_MAC,
7624 H2C_CL_MAC_PS,
7625 H2C_FUNC_IPS_CFG, 0, 1,
7626 len);
7627
7628 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG);
7629 }
7630
rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev * rtwdev)7631 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
7632 {
7633 struct rtw89_wait_info *wait = &rtwdev->wow.wait;
7634 struct rtw89_h2c_wow_aoac *h2c;
7635 u32 len = sizeof(*h2c);
7636 struct sk_buff *skb;
7637
7638 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7639 if (!skb) {
7640 rtw89_err(rtwdev, "failed to alloc skb for aoac\n");
7641 return -ENOMEM;
7642 }
7643
7644 skb_put(skb, len);
7645
7646 /* This H2C only nofity firmware to generate AOAC report C2H,
7647 * no need any parameter.
7648 */
7649 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7650 H2C_CAT_MAC,
7651 H2C_CL_MAC_WOW,
7652 H2C_FUNC_AOAC_REPORT_REQ, 1, 0,
7653 len);
7654
7655 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC);
7656 }
7657
7658 /* Return < 0, if failures happen during waiting for the condition.
7659 * Return 0, when waiting for the condition succeeds.
7660 * Return > 0, if the wait is considered unreachable due to driver/FW design,
7661 * where 1 means during SER.
7662 */
rtw89_h2c_tx_and_wait(struct rtw89_dev * rtwdev,struct sk_buff * skb,struct rtw89_wait_info * wait,unsigned int cond)7663 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
7664 struct rtw89_wait_info *wait, unsigned int cond)
7665 {
7666 int ret;
7667
7668 ret = rtw89_h2c_tx(rtwdev, skb, false);
7669 if (ret) {
7670 rtw89_err(rtwdev, "failed to send h2c\n");
7671 dev_kfree_skb_any(skb);
7672 return -EBUSY;
7673 }
7674
7675 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
7676 return 1;
7677
7678 return rtw89_wait_for_cond(wait, cond);
7679 }
7680
7681 #define H2C_ADD_MCC_LEN 16
rtw89_fw_h2c_add_mcc(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_add_req * p)7682 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
7683 const struct rtw89_fw_mcc_add_req *p)
7684 {
7685 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
7686 struct sk_buff *skb;
7687 unsigned int cond;
7688
7689 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN);
7690 if (!skb) {
7691 rtw89_err(rtwdev,
7692 "failed to alloc skb for add mcc\n");
7693 return -ENOMEM;
7694 }
7695
7696 skb_put(skb, H2C_ADD_MCC_LEN);
7697 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid);
7698 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0);
7699 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1);
7700 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch);
7701 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth);
7702 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group);
7703 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt);
7704 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null);
7705 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry);
7706 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch);
7707 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count);
7708 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early);
7709 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g);
7710 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en);
7711 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass);
7712 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type);
7713 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration);
7714 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en);
7715 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num);
7716 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target);
7717
7718 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7719 H2C_CAT_MAC,
7720 H2C_CL_MCC,
7721 H2C_FUNC_ADD_MCC, 0, 0,
7722 H2C_ADD_MCC_LEN);
7723
7724 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC);
7725 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
7726 }
7727
7728 #define H2C_START_MCC_LEN 12
rtw89_fw_h2c_start_mcc(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_start_req * p)7729 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev,
7730 const struct rtw89_fw_mcc_start_req *p)
7731 {
7732 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
7733 struct sk_buff *skb;
7734 unsigned int cond;
7735
7736 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN);
7737 if (!skb) {
7738 rtw89_err(rtwdev,
7739 "failed to alloc skb for start mcc\n");
7740 return -ENOMEM;
7741 }
7742
7743 skb_put(skb, H2C_START_MCC_LEN);
7744 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group);
7745 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group);
7746 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action);
7747 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group);
7748 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt);
7749 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en);
7750 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid);
7751 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low);
7752 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high);
7753
7754 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7755 H2C_CAT_MAC,
7756 H2C_CL_MCC,
7757 H2C_FUNC_START_MCC, 0, 0,
7758 H2C_START_MCC_LEN);
7759
7760 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC);
7761 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
7762 }
7763
7764 #define H2C_STOP_MCC_LEN 4
rtw89_fw_h2c_stop_mcc(struct rtw89_dev * rtwdev,u8 group,u8 macid,bool prev_groups)7765 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid,
7766 bool prev_groups)
7767 {
7768 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
7769 struct sk_buff *skb;
7770 unsigned int cond;
7771
7772 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN);
7773 if (!skb) {
7774 rtw89_err(rtwdev,
7775 "failed to alloc skb for stop mcc\n");
7776 return -ENOMEM;
7777 }
7778
7779 skb_put(skb, H2C_STOP_MCC_LEN);
7780 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid);
7781 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group);
7782 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups);
7783
7784 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7785 H2C_CAT_MAC,
7786 H2C_CL_MCC,
7787 H2C_FUNC_STOP_MCC, 0, 0,
7788 H2C_STOP_MCC_LEN);
7789
7790 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC);
7791 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
7792 }
7793
7794 #define H2C_DEL_MCC_GROUP_LEN 4
rtw89_fw_h2c_del_mcc_group(struct rtw89_dev * rtwdev,u8 group,bool prev_groups)7795 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group,
7796 bool prev_groups)
7797 {
7798 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
7799 struct sk_buff *skb;
7800 unsigned int cond;
7801
7802 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN);
7803 if (!skb) {
7804 rtw89_err(rtwdev,
7805 "failed to alloc skb for del mcc group\n");
7806 return -ENOMEM;
7807 }
7808
7809 skb_put(skb, H2C_DEL_MCC_GROUP_LEN);
7810 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group);
7811 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups);
7812
7813 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7814 H2C_CAT_MAC,
7815 H2C_CL_MCC,
7816 H2C_FUNC_DEL_MCC_GROUP, 0, 0,
7817 H2C_DEL_MCC_GROUP_LEN);
7818
7819 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP);
7820 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
7821 }
7822
7823 #define H2C_RESET_MCC_GROUP_LEN 4
rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev * rtwdev,u8 group)7824 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group)
7825 {
7826 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
7827 struct sk_buff *skb;
7828 unsigned int cond;
7829
7830 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN);
7831 if (!skb) {
7832 rtw89_err(rtwdev,
7833 "failed to alloc skb for reset mcc group\n");
7834 return -ENOMEM;
7835 }
7836
7837 skb_put(skb, H2C_RESET_MCC_GROUP_LEN);
7838 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group);
7839
7840 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7841 H2C_CAT_MAC,
7842 H2C_CL_MCC,
7843 H2C_FUNC_RESET_MCC_GROUP, 0, 0,
7844 H2C_RESET_MCC_GROUP_LEN);
7845
7846 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP);
7847 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
7848 }
7849
7850 #define H2C_MCC_REQ_TSF_LEN 4
rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_tsf_req * req,struct rtw89_mac_mcc_tsf_rpt * rpt)7851 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev,
7852 const struct rtw89_fw_mcc_tsf_req *req,
7853 struct rtw89_mac_mcc_tsf_rpt *rpt)
7854 {
7855 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
7856 struct rtw89_mac_mcc_tsf_rpt *tmp;
7857 struct sk_buff *skb;
7858 unsigned int cond;
7859 int ret;
7860
7861 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN);
7862 if (!skb) {
7863 rtw89_err(rtwdev,
7864 "failed to alloc skb for mcc req tsf\n");
7865 return -ENOMEM;
7866 }
7867
7868 skb_put(skb, H2C_MCC_REQ_TSF_LEN);
7869 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group);
7870 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x);
7871 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y);
7872
7873 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7874 H2C_CAT_MAC,
7875 H2C_CL_MCC,
7876 H2C_FUNC_MCC_REQ_TSF, 0, 0,
7877 H2C_MCC_REQ_TSF_LEN);
7878
7879 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF);
7880 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
7881 if (ret)
7882 return ret;
7883
7884 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf;
7885 *rpt = *tmp;
7886
7887 return 0;
7888 }
7889
7890 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4
rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev * rtwdev,u8 group,u8 macid,u8 * bitmap)7891 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid,
7892 u8 *bitmap)
7893 {
7894 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
7895 struct sk_buff *skb;
7896 unsigned int cond;
7897 u8 map_len;
7898 u8 h2c_len;
7899
7900 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8);
7901 map_len = RTW89_MAX_MAC_ID_NUM / 8;
7902 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len;
7903 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len);
7904 if (!skb) {
7905 rtw89_err(rtwdev,
7906 "failed to alloc skb for mcc macid bitmap\n");
7907 return -ENOMEM;
7908 }
7909
7910 skb_put(skb, h2c_len);
7911 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group);
7912 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid);
7913 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len);
7914 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len);
7915
7916 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7917 H2C_CAT_MAC,
7918 H2C_CL_MCC,
7919 H2C_FUNC_MCC_MACID_BITMAP, 0, 0,
7920 h2c_len);
7921
7922 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP);
7923 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
7924 }
7925
7926 #define H2C_MCC_SYNC_LEN 4
rtw89_fw_h2c_mcc_sync(struct rtw89_dev * rtwdev,u8 group,u8 source,u8 target,u8 offset)7927 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
7928 u8 target, u8 offset)
7929 {
7930 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
7931 struct sk_buff *skb;
7932 unsigned int cond;
7933
7934 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN);
7935 if (!skb) {
7936 rtw89_err(rtwdev,
7937 "failed to alloc skb for mcc sync\n");
7938 return -ENOMEM;
7939 }
7940
7941 skb_put(skb, H2C_MCC_SYNC_LEN);
7942 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group);
7943 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source);
7944 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target);
7945 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset);
7946
7947 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7948 H2C_CAT_MAC,
7949 H2C_CL_MCC,
7950 H2C_FUNC_MCC_SYNC, 0, 0,
7951 H2C_MCC_SYNC_LEN);
7952
7953 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC);
7954 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
7955 }
7956
7957 #define H2C_MCC_SET_DURATION_LEN 20
rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_duration * p)7958 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
7959 const struct rtw89_fw_mcc_duration *p)
7960 {
7961 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
7962 struct sk_buff *skb;
7963 unsigned int cond;
7964
7965 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN);
7966 if (!skb) {
7967 rtw89_err(rtwdev,
7968 "failed to alloc skb for mcc set duration\n");
7969 return -ENOMEM;
7970 }
7971
7972 skb_put(skb, H2C_MCC_SET_DURATION_LEN);
7973 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group);
7974 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group);
7975 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid);
7976 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x);
7977 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y);
7978 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data,
7979 p->start_tsf_low);
7980 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data,
7981 p->start_tsf_high);
7982 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x);
7983 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y);
7984
7985 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7986 H2C_CAT_MAC,
7987 H2C_CL_MCC,
7988 H2C_FUNC_MCC_SET_DURATION, 0, 0,
7989 H2C_MCC_SET_DURATION_LEN);
7990
7991 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION);
7992 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
7993 }
7994
7995 static
rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_add_slot_arg * slot_arg,struct rtw89_h2c_mrc_add_slot * slot_h2c)7996 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev,
7997 const struct rtw89_fw_mrc_add_slot_arg *slot_arg,
7998 struct rtw89_h2c_mrc_add_slot *slot_h2c)
7999 {
8000 bool fill_h2c = !!slot_h2c;
8001 unsigned int i;
8002
8003 if (!fill_h2c)
8004 goto calc_len;
8005
8006 slot_h2c->w0 = le32_encode_bits(slot_arg->duration,
8007 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) |
8008 le32_encode_bits(slot_arg->courtesy_en,
8009 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) |
8010 le32_encode_bits(slot_arg->role_num,
8011 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM);
8012 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period,
8013 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) |
8014 le32_encode_bits(slot_arg->courtesy_target,
8015 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET);
8016
8017 for (i = 0; i < slot_arg->role_num; i++) {
8018 slot_h2c->roles[i].w0 =
8019 le32_encode_bits(slot_arg->roles[i].macid,
8020 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) |
8021 le32_encode_bits(slot_arg->roles[i].role_type,
8022 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) |
8023 le32_encode_bits(slot_arg->roles[i].is_master,
8024 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) |
8025 le32_encode_bits(slot_arg->roles[i].en_tx_null,
8026 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) |
8027 le32_encode_bits(false,
8028 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) |
8029 le32_encode_bits(false,
8030 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN);
8031 slot_h2c->roles[i].w1 =
8032 le32_encode_bits(slot_arg->roles[i].central_ch,
8033 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) |
8034 le32_encode_bits(slot_arg->roles[i].primary_ch,
8035 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) |
8036 le32_encode_bits(slot_arg->roles[i].bw,
8037 RTW89_H2C_MRC_ADD_ROLE_W1_BW) |
8038 le32_encode_bits(slot_arg->roles[i].band,
8039 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) |
8040 le32_encode_bits(slot_arg->roles[i].null_early,
8041 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) |
8042 le32_encode_bits(false,
8043 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) |
8044 le32_encode_bits(true,
8045 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC);
8046 slot_h2c->roles[i].macid_main_bitmap =
8047 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap);
8048 slot_h2c->roles[i].macid_paired_bitmap =
8049 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap);
8050 }
8051
8052 calc_len:
8053 return struct_size(slot_h2c, roles, slot_arg->role_num);
8054 }
8055
rtw89_fw_h2c_mrc_add(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_add_arg * arg)8056 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev,
8057 const struct rtw89_fw_mrc_add_arg *arg)
8058 {
8059 struct rtw89_h2c_mrc_add *h2c_head;
8060 struct sk_buff *skb;
8061 unsigned int i;
8062 void *tmp;
8063 u32 len;
8064 int ret;
8065
8066 len = sizeof(*h2c_head);
8067 for (i = 0; i < arg->slot_num; i++)
8068 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL);
8069
8070 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8071 if (!skb) {
8072 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n");
8073 return -ENOMEM;
8074 }
8075
8076 skb_put(skb, len);
8077 tmp = skb->data;
8078
8079 h2c_head = tmp;
8080 h2c_head->w0 = le32_encode_bits(arg->sch_idx,
8081 RTW89_H2C_MRC_ADD_W0_SCH_IDX) |
8082 le32_encode_bits(arg->sch_type,
8083 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) |
8084 le32_encode_bits(arg->slot_num,
8085 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) |
8086 le32_encode_bits(arg->btc_in_sch,
8087 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH);
8088
8089 tmp += sizeof(*h2c_head);
8090 for (i = 0; i < arg->slot_num; i++)
8091 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp);
8092
8093 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8094 H2C_CAT_MAC,
8095 H2C_CL_MRC,
8096 H2C_FUNC_ADD_MRC, 0, 0,
8097 len);
8098
8099 ret = rtw89_h2c_tx(rtwdev, skb, false);
8100 if (ret) {
8101 rtw89_err(rtwdev, "failed to send h2c\n");
8102 dev_kfree_skb_any(skb);
8103 return -EBUSY;
8104 }
8105
8106 return 0;
8107 }
8108
rtw89_fw_h2c_mrc_start(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_start_arg * arg)8109 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
8110 const struct rtw89_fw_mrc_start_arg *arg)
8111 {
8112 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8113 struct rtw89_h2c_mrc_start *h2c;
8114 u32 len = sizeof(*h2c);
8115 struct sk_buff *skb;
8116 unsigned int cond;
8117
8118 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8119 if (!skb) {
8120 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n");
8121 return -ENOMEM;
8122 }
8123
8124 skb_put(skb, len);
8125 h2c = (struct rtw89_h2c_mrc_start *)skb->data;
8126
8127 h2c->w0 = le32_encode_bits(arg->sch_idx,
8128 RTW89_H2C_MRC_START_W0_SCH_IDX) |
8129 le32_encode_bits(arg->old_sch_idx,
8130 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) |
8131 le32_encode_bits(arg->action,
8132 RTW89_H2C_MRC_START_W0_ACTION);
8133
8134 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
8135 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
8136
8137 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8138 H2C_CAT_MAC,
8139 H2C_CL_MRC,
8140 H2C_FUNC_START_MRC, 0, 0,
8141 len);
8142
8143 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC);
8144 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8145 }
8146
rtw89_fw_h2c_mrc_del(struct rtw89_dev * rtwdev,u8 sch_idx,u8 slot_idx)8147 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx)
8148 {
8149 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8150 struct rtw89_h2c_mrc_del *h2c;
8151 u32 len = sizeof(*h2c);
8152 struct sk_buff *skb;
8153 unsigned int cond;
8154
8155 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8156 if (!skb) {
8157 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n");
8158 return -ENOMEM;
8159 }
8160
8161 skb_put(skb, len);
8162 h2c = (struct rtw89_h2c_mrc_del *)skb->data;
8163
8164 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) |
8165 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX);
8166
8167 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8168 H2C_CAT_MAC,
8169 H2C_CL_MRC,
8170 H2C_FUNC_DEL_MRC, 0, 0,
8171 len);
8172
8173 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC);
8174 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8175 }
8176
rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_req_tsf_arg * arg,struct rtw89_mac_mrc_tsf_rpt * rpt)8177 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev,
8178 const struct rtw89_fw_mrc_req_tsf_arg *arg,
8179 struct rtw89_mac_mrc_tsf_rpt *rpt)
8180 {
8181 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8182 struct rtw89_h2c_mrc_req_tsf *h2c;
8183 struct rtw89_mac_mrc_tsf_rpt *tmp;
8184 struct sk_buff *skb;
8185 unsigned int i;
8186 u32 len;
8187 int ret;
8188
8189 len = struct_size(h2c, infos, arg->num);
8190 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8191 if (!skb) {
8192 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n");
8193 return -ENOMEM;
8194 }
8195
8196 skb_put(skb, len);
8197 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data;
8198
8199 h2c->req_tsf_num = arg->num;
8200 for (i = 0; i < arg->num; i++)
8201 h2c->infos[i] =
8202 u8_encode_bits(arg->infos[i].band,
8203 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) |
8204 u8_encode_bits(arg->infos[i].port,
8205 RTW89_H2C_MRC_REQ_TSF_INFO_PORT);
8206
8207 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8208 H2C_CAT_MAC,
8209 H2C_CL_MRC,
8210 H2C_FUNC_MRC_REQ_TSF, 0, 0,
8211 len);
8212
8213 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF);
8214 if (ret)
8215 return ret;
8216
8217 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf;
8218 *rpt = *tmp;
8219
8220 return 0;
8221 }
8222
rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_upd_bitmap_arg * arg)8223 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev,
8224 const struct rtw89_fw_mrc_upd_bitmap_arg *arg)
8225 {
8226 struct rtw89_h2c_mrc_upd_bitmap *h2c;
8227 u32 len = sizeof(*h2c);
8228 struct sk_buff *skb;
8229 int ret;
8230
8231 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8232 if (!skb) {
8233 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n");
8234 return -ENOMEM;
8235 }
8236
8237 skb_put(skb, len);
8238 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data;
8239
8240 h2c->w0 = le32_encode_bits(arg->sch_idx,
8241 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) |
8242 le32_encode_bits(arg->action,
8243 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) |
8244 le32_encode_bits(arg->macid,
8245 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID);
8246 h2c->w1 = le32_encode_bits(arg->client_macid,
8247 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID);
8248
8249 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8250 H2C_CAT_MAC,
8251 H2C_CL_MRC,
8252 H2C_FUNC_MRC_UPD_BITMAP, 0, 0,
8253 len);
8254
8255 ret = rtw89_h2c_tx(rtwdev, skb, false);
8256 if (ret) {
8257 rtw89_err(rtwdev, "failed to send h2c\n");
8258 dev_kfree_skb_any(skb);
8259 return -EBUSY;
8260 }
8261
8262 return 0;
8263 }
8264
rtw89_fw_h2c_mrc_sync(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_sync_arg * arg)8265 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
8266 const struct rtw89_fw_mrc_sync_arg *arg)
8267 {
8268 struct rtw89_h2c_mrc_sync *h2c;
8269 u32 len = sizeof(*h2c);
8270 struct sk_buff *skb;
8271 int ret;
8272
8273 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8274 if (!skb) {
8275 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n");
8276 return -ENOMEM;
8277 }
8278
8279 skb_put(skb, len);
8280 h2c = (struct rtw89_h2c_mrc_sync *)skb->data;
8281
8282 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) |
8283 le32_encode_bits(arg->src.port,
8284 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) |
8285 le32_encode_bits(arg->src.band,
8286 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) |
8287 le32_encode_bits(arg->dest.port,
8288 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) |
8289 le32_encode_bits(arg->dest.band,
8290 RTW89_H2C_MRC_SYNC_W0_DEST_BAND);
8291 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET);
8292
8293 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8294 H2C_CAT_MAC,
8295 H2C_CL_MRC,
8296 H2C_FUNC_MRC_SYNC, 0, 0,
8297 len);
8298
8299 ret = rtw89_h2c_tx(rtwdev, skb, false);
8300 if (ret) {
8301 rtw89_err(rtwdev, "failed to send h2c\n");
8302 dev_kfree_skb_any(skb);
8303 return -EBUSY;
8304 }
8305
8306 return 0;
8307 }
8308
rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_upd_duration_arg * arg)8309 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
8310 const struct rtw89_fw_mrc_upd_duration_arg *arg)
8311 {
8312 struct rtw89_h2c_mrc_upd_duration *h2c;
8313 struct sk_buff *skb;
8314 unsigned int i;
8315 u32 len;
8316 int ret;
8317
8318 len = struct_size(h2c, slots, arg->slot_num);
8319 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8320 if (!skb) {
8321 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n");
8322 return -ENOMEM;
8323 }
8324
8325 skb_put(skb, len);
8326 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data;
8327
8328 h2c->w0 = le32_encode_bits(arg->sch_idx,
8329 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) |
8330 le32_encode_bits(arg->slot_num,
8331 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) |
8332 le32_encode_bits(false,
8333 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH);
8334
8335 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
8336 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
8337
8338 for (i = 0; i < arg->slot_num; i++) {
8339 h2c->slots[i] =
8340 le32_encode_bits(arg->slots[i].slot_idx,
8341 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) |
8342 le32_encode_bits(arg->slots[i].duration,
8343 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION);
8344 }
8345
8346 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8347 H2C_CAT_MAC,
8348 H2C_CL_MRC,
8349 H2C_FUNC_MRC_UPD_DURATION, 0, 0,
8350 len);
8351
8352 ret = rtw89_h2c_tx(rtwdev, skb, false);
8353 if (ret) {
8354 rtw89_err(rtwdev, "failed to send h2c\n");
8355 dev_kfree_skb_any(skb);
8356 return -EBUSY;
8357 }
8358
8359 return 0;
8360 }
8361
rtw89_fw_h2c_ap_info(struct rtw89_dev * rtwdev,bool en)8362 static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en)
8363 {
8364 struct rtw89_h2c_ap_info *h2c;
8365 u32 len = sizeof(*h2c);
8366 struct sk_buff *skb;
8367 int ret;
8368
8369 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8370 if (!skb) {
8371 rtw89_err(rtwdev, "failed to alloc skb for ap info\n");
8372 return -ENOMEM;
8373 }
8374
8375 skb_put(skb, len);
8376 h2c = (struct rtw89_h2c_ap_info *)skb->data;
8377
8378 h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN);
8379
8380 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8381 H2C_CAT_MAC,
8382 H2C_CL_AP,
8383 H2C_FUNC_AP_INFO, 0, 0,
8384 len);
8385
8386 ret = rtw89_h2c_tx(rtwdev, skb, false);
8387 if (ret) {
8388 rtw89_err(rtwdev, "failed to send h2c\n");
8389 dev_kfree_skb_any(skb);
8390 return -EBUSY;
8391 }
8392
8393 return 0;
8394 }
8395
rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev * rtwdev,bool en)8396 int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en)
8397 {
8398 int ret;
8399
8400 if (en) {
8401 if (refcount_inc_not_zero(&rtwdev->refcount_ap_info))
8402 return 0;
8403 } else {
8404 if (!refcount_dec_and_test(&rtwdev->refcount_ap_info))
8405 return 0;
8406 }
8407
8408 ret = rtw89_fw_h2c_ap_info(rtwdev, en);
8409 if (ret) {
8410 if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
8411 return ret;
8412
8413 /* During recovery, neither driver nor stack has full error
8414 * handling, so show a warning, but return 0 with refcount
8415 * increased normally. It can avoid underflow when calling
8416 * with @en == false later.
8417 */
8418 rtw89_warn(rtwdev, "h2c ap_info failed during SER\n");
8419 }
8420
8421 if (en)
8422 refcount_set(&rtwdev->refcount_ap_info, 1);
8423
8424 return 0;
8425 }
8426
__fw_txpwr_entry_zero_ext(const void * ext_ptr,u8 ext_len)8427 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
8428 {
8429 static const u8 zeros[U8_MAX] = {};
8430
8431 return memcmp(ext_ptr, zeros, ext_len) == 0;
8432 }
8433
8434 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \
8435 ({ \
8436 u8 __var_sz = sizeof(*(e)); \
8437 bool __accept; \
8438 if (__var_sz >= (ent_sz)) \
8439 __accept = true; \
8440 else \
8441 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\
8442 (ent_sz) - __var_sz);\
8443 __accept; \
8444 })
8445
8446 static bool
fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)8447 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e,
8448 const void *cursor,
8449 const struct rtw89_txpwr_conf *conf)
8450 {
8451 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
8452 return false;
8453
8454 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM)
8455 return false;
8456
8457 switch (e->rs) {
8458 case RTW89_RS_CCK:
8459 if (e->shf + e->len > RTW89_RATE_CCK_NUM)
8460 return false;
8461 break;
8462 case RTW89_RS_OFDM:
8463 if (e->shf + e->len > RTW89_RATE_OFDM_NUM)
8464 return false;
8465 break;
8466 case RTW89_RS_MCS:
8467 if (e->shf + e->len > __RTW89_RATE_MCS_NUM ||
8468 e->nss >= RTW89_NSS_NUM ||
8469 e->ofdma >= RTW89_OFDMA_NUM)
8470 return false;
8471 break;
8472 case RTW89_RS_HEDCM:
8473 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM ||
8474 e->nss >= RTW89_NSS_HEDCM_NUM ||
8475 e->ofdma >= RTW89_OFDMA_NUM)
8476 return false;
8477 break;
8478 case RTW89_RS_OFFSET:
8479 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM)
8480 return false;
8481 break;
8482 default:
8483 return false;
8484 }
8485
8486 return true;
8487 }
8488
8489 static
rtw89_fw_load_txpwr_byrate(struct rtw89_dev * rtwdev,const struct rtw89_txpwr_table * tbl)8490 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev,
8491 const struct rtw89_txpwr_table *tbl)
8492 {
8493 const struct rtw89_txpwr_conf *conf = tbl->data;
8494 struct rtw89_fw_txpwr_byrate_entry entry = {};
8495 struct rtw89_txpwr_byrate *byr_head;
8496 struct rtw89_rate_desc desc = {};
8497 const void *cursor;
8498 u32 data;
8499 s8 *byr;
8500 int i;
8501
8502 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
8503 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf))
8504 continue;
8505
8506 byr_head = &rtwdev->byr[entry.band][entry.bw];
8507 data = le32_to_cpu(entry.data);
8508 desc.ofdma = entry.ofdma;
8509 desc.nss = entry.nss;
8510 desc.rs = entry.rs;
8511
8512 for (i = 0; i < entry.len; i++, data >>= 8) {
8513 desc.idx = entry.shf + i;
8514 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc);
8515 *byr = data & 0xff;
8516 }
8517 }
8518 }
8519
8520 static bool
fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)8521 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e,
8522 const void *cursor,
8523 const struct rtw89_txpwr_conf *conf)
8524 {
8525 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
8526 return false;
8527
8528 if (e->bw >= RTW89_2G_BW_NUM)
8529 return false;
8530 if (e->nt >= RTW89_NTX_NUM)
8531 return false;
8532 if (e->rs >= RTW89_RS_LMT_NUM)
8533 return false;
8534 if (e->bf >= RTW89_BF_NUM)
8535 return false;
8536 if (e->regd >= RTW89_REGD_NUM)
8537 return false;
8538 if (e->ch_idx >= RTW89_2G_CH_NUM)
8539 return false;
8540
8541 return true;
8542 }
8543
8544 static
rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data * data)8545 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data)
8546 {
8547 const struct rtw89_txpwr_conf *conf = &data->conf;
8548 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {};
8549 const void *cursor;
8550
8551 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
8552 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf))
8553 continue;
8554
8555 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
8556 [entry.ch_idx] = entry.v;
8557 }
8558 }
8559
8560 static bool
fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)8561 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e,
8562 const void *cursor,
8563 const struct rtw89_txpwr_conf *conf)
8564 {
8565 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
8566 return false;
8567
8568 if (e->bw >= RTW89_5G_BW_NUM)
8569 return false;
8570 if (e->nt >= RTW89_NTX_NUM)
8571 return false;
8572 if (e->rs >= RTW89_RS_LMT_NUM)
8573 return false;
8574 if (e->bf >= RTW89_BF_NUM)
8575 return false;
8576 if (e->regd >= RTW89_REGD_NUM)
8577 return false;
8578 if (e->ch_idx >= RTW89_5G_CH_NUM)
8579 return false;
8580
8581 return true;
8582 }
8583
8584 static
rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data * data)8585 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data)
8586 {
8587 const struct rtw89_txpwr_conf *conf = &data->conf;
8588 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {};
8589 const void *cursor;
8590
8591 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
8592 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf))
8593 continue;
8594
8595 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
8596 [entry.ch_idx] = entry.v;
8597 }
8598 }
8599
8600 static bool
fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)8601 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e,
8602 const void *cursor,
8603 const struct rtw89_txpwr_conf *conf)
8604 {
8605 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
8606 return false;
8607
8608 if (e->bw >= RTW89_6G_BW_NUM)
8609 return false;
8610 if (e->nt >= RTW89_NTX_NUM)
8611 return false;
8612 if (e->rs >= RTW89_RS_LMT_NUM)
8613 return false;
8614 if (e->bf >= RTW89_BF_NUM)
8615 return false;
8616 if (e->regd >= RTW89_REGD_NUM)
8617 return false;
8618 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER)
8619 return false;
8620 if (e->ch_idx >= RTW89_6G_CH_NUM)
8621 return false;
8622
8623 return true;
8624 }
8625
8626 static
rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data * data)8627 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data)
8628 {
8629 const struct rtw89_txpwr_conf *conf = &data->conf;
8630 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {};
8631 const void *cursor;
8632
8633 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
8634 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf))
8635 continue;
8636
8637 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
8638 [entry.reg_6ghz_power][entry.ch_idx] = entry.v;
8639 }
8640 }
8641
8642 static bool
fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)8643 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e,
8644 const void *cursor,
8645 const struct rtw89_txpwr_conf *conf)
8646 {
8647 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
8648 return false;
8649
8650 if (e->ru >= RTW89_RU_NUM)
8651 return false;
8652 if (e->nt >= RTW89_NTX_NUM)
8653 return false;
8654 if (e->regd >= RTW89_REGD_NUM)
8655 return false;
8656 if (e->ch_idx >= RTW89_2G_CH_NUM)
8657 return false;
8658
8659 return true;
8660 }
8661
8662 static
rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data * data)8663 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data)
8664 {
8665 const struct rtw89_txpwr_conf *conf = &data->conf;
8666 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {};
8667 const void *cursor;
8668
8669 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
8670 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf))
8671 continue;
8672
8673 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v;
8674 }
8675 }
8676
8677 static bool
fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)8678 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e,
8679 const void *cursor,
8680 const struct rtw89_txpwr_conf *conf)
8681 {
8682 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
8683 return false;
8684
8685 if (e->ru >= RTW89_RU_NUM)
8686 return false;
8687 if (e->nt >= RTW89_NTX_NUM)
8688 return false;
8689 if (e->regd >= RTW89_REGD_NUM)
8690 return false;
8691 if (e->ch_idx >= RTW89_5G_CH_NUM)
8692 return false;
8693
8694 return true;
8695 }
8696
8697 static
rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data * data)8698 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data)
8699 {
8700 const struct rtw89_txpwr_conf *conf = &data->conf;
8701 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {};
8702 const void *cursor;
8703
8704 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
8705 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf))
8706 continue;
8707
8708 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v;
8709 }
8710 }
8711
8712 static bool
fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)8713 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e,
8714 const void *cursor,
8715 const struct rtw89_txpwr_conf *conf)
8716 {
8717 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
8718 return false;
8719
8720 if (e->ru >= RTW89_RU_NUM)
8721 return false;
8722 if (e->nt >= RTW89_NTX_NUM)
8723 return false;
8724 if (e->regd >= RTW89_REGD_NUM)
8725 return false;
8726 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER)
8727 return false;
8728 if (e->ch_idx >= RTW89_6G_CH_NUM)
8729 return false;
8730
8731 return true;
8732 }
8733
8734 static
rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data * data)8735 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data)
8736 {
8737 const struct rtw89_txpwr_conf *conf = &data->conf;
8738 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {};
8739 const void *cursor;
8740
8741 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
8742 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf))
8743 continue;
8744
8745 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power]
8746 [entry.ch_idx] = entry.v;
8747 }
8748 }
8749
8750 static bool
fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)8751 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e,
8752 const void *cursor,
8753 const struct rtw89_txpwr_conf *conf)
8754 {
8755 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
8756 return false;
8757
8758 if (e->band >= RTW89_BAND_NUM)
8759 return false;
8760 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM)
8761 return false;
8762 if (e->regd >= RTW89_REGD_NUM)
8763 return false;
8764
8765 return true;
8766 }
8767
8768 static
rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data * data)8769 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data)
8770 {
8771 const struct rtw89_txpwr_conf *conf = &data->conf;
8772 struct rtw89_fw_tx_shape_lmt_entry entry = {};
8773 const void *cursor;
8774
8775 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
8776 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf))
8777 continue;
8778
8779 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v;
8780 }
8781 }
8782
8783 static bool
fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)8784 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e,
8785 const void *cursor,
8786 const struct rtw89_txpwr_conf *conf)
8787 {
8788 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
8789 return false;
8790
8791 if (e->band >= RTW89_BAND_NUM)
8792 return false;
8793 if (e->regd >= RTW89_REGD_NUM)
8794 return false;
8795
8796 return true;
8797 }
8798
8799 static
rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data * data)8800 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data)
8801 {
8802 const struct rtw89_txpwr_conf *conf = &data->conf;
8803 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {};
8804 const void *cursor;
8805
8806 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
8807 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf))
8808 continue;
8809
8810 data->v[entry.band][entry.regd] = entry.v;
8811 }
8812 }
8813
8814 const struct rtw89_rfe_parms *
rtw89_load_rfe_data_from_fw(struct rtw89_dev * rtwdev,const struct rtw89_rfe_parms * init)8815 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
8816 const struct rtw89_rfe_parms *init)
8817 {
8818 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data;
8819 struct rtw89_rfe_parms *parms;
8820
8821 if (!rfe_data)
8822 return init;
8823
8824 parms = &rfe_data->rfe_parms;
8825 if (init)
8826 *parms = *init;
8827
8828 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) {
8829 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf;
8830 rfe_data->byrate.tbl.size = 0; /* don't care here */
8831 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate;
8832 parms->byr_tbl = &rfe_data->byrate.tbl;
8833 }
8834
8835 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) {
8836 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz);
8837 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v;
8838 }
8839
8840 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) {
8841 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz);
8842 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v;
8843 }
8844
8845 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) {
8846 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz);
8847 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v;
8848 }
8849
8850 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) {
8851 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz);
8852 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v;
8853 }
8854
8855 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) {
8856 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz);
8857 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v;
8858 }
8859
8860 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) {
8861 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz);
8862 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v;
8863 }
8864
8865 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) {
8866 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt);
8867 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v;
8868 }
8869
8870 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) {
8871 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru);
8872 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v;
8873 }
8874
8875 return parms;
8876 }
8877