1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell.
5  *
6  */
7 
8 #ifdef CONFIG_DEBUG_FS
9 
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 #include "rvu_npc_hash.h"
22 #include "mcs.h"
23 
24 #define DEBUGFS_DIR_NAME "octeontx2"
25 
26 enum {
27 	CGX_STAT0,
28 	CGX_STAT1,
29 	CGX_STAT2,
30 	CGX_STAT3,
31 	CGX_STAT4,
32 	CGX_STAT5,
33 	CGX_STAT6,
34 	CGX_STAT7,
35 	CGX_STAT8,
36 	CGX_STAT9,
37 	CGX_STAT10,
38 	CGX_STAT11,
39 	CGX_STAT12,
40 	CGX_STAT13,
41 	CGX_STAT14,
42 	CGX_STAT15,
43 	CGX_STAT16,
44 	CGX_STAT17,
45 	CGX_STAT18,
46 };
47 
48 static char *cgx_rx_stats_fields[] = {
49 	[CGX_STAT0]	= "Received packets",
50 	[CGX_STAT1]	= "Octets of received packets",
51 	[CGX_STAT2]	= "Received PAUSE packets",
52 	[CGX_STAT3]	= "Received PAUSE and control packets",
53 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
54 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
55 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
56 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
57 	[CGX_STAT8]	= "Error packets",
58 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
59 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
60 	[CGX_STAT11]	= "NCSI-bound packets dropped",
61 	[CGX_STAT12]	= "NCSI-bound octets dropped",
62 };
63 
64 static char *cgx_tx_stats_fields[] = {
65 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
66 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
67 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
68 	[CGX_STAT3]	= "Single collisions before successful transmission",
69 	[CGX_STAT4]	= "Total octets sent on the interface",
70 	[CGX_STAT5]	= "Total frames sent on the interface",
71 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
72 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
73 	[CGX_STAT8]	= "Packets sent with an octet count of 65-127",
74 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
75 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
76 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
77 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
78 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
79 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
80 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
81 	[CGX_STAT16]	= "Transmit underflow and were truncated",
82 	[CGX_STAT17]	= "Control/PAUSE packets sent",
83 };
84 
85 static char *rpm_rx_stats_fields[] = {
86 	"Octets of received packets",
87 	"Octets of received packets with out error",
88 	"Received packets with alignment errors",
89 	"Control/PAUSE packets received",
90 	"Packets received with Frame too long Errors",
91 	"Packets received with a1nrange length Errors",
92 	"Received packets",
93 	"Packets received with FrameCheckSequenceErrors",
94 	"Packets received with VLAN header",
95 	"Error packets",
96 	"Packets received with unicast DMAC",
97 	"Packets received with multicast DMAC",
98 	"Packets received with broadcast DMAC",
99 	"Dropped packets",
100 	"Total frames received on interface",
101 	"Packets received with an octet count < 64",
102 	"Packets received with an octet count == 64",
103 	"Packets received with an octet count of 65-127",
104 	"Packets received with an octet count of 128-255",
105 	"Packets received with an octet count of 256-511",
106 	"Packets received with an octet count of 512-1023",
107 	"Packets received with an octet count of 1024-1518",
108 	"Packets received with an octet count of > 1518",
109 	"Oversized Packets",
110 	"Jabber Packets",
111 	"Fragmented Packets",
112 	"CBFC(class based flow control) pause frames received for class 0",
113 	"CBFC pause frames received for class 1",
114 	"CBFC pause frames received for class 2",
115 	"CBFC pause frames received for class 3",
116 	"CBFC pause frames received for class 4",
117 	"CBFC pause frames received for class 5",
118 	"CBFC pause frames received for class 6",
119 	"CBFC pause frames received for class 7",
120 	"CBFC pause frames received for class 8",
121 	"CBFC pause frames received for class 9",
122 	"CBFC pause frames received for class 10",
123 	"CBFC pause frames received for class 11",
124 	"CBFC pause frames received for class 12",
125 	"CBFC pause frames received for class 13",
126 	"CBFC pause frames received for class 14",
127 	"CBFC pause frames received for class 15",
128 	"MAC control packets received",
129 };
130 
131 static char *rpm_tx_stats_fields[] = {
132 	"Total octets sent on the interface",
133 	"Total octets transmitted OK",
134 	"Control/Pause frames sent",
135 	"Total frames transmitted OK",
136 	"Total frames sent with VLAN header",
137 	"Error Packets",
138 	"Packets sent to unicast DMAC",
139 	"Packets sent to the multicast DMAC",
140 	"Packets sent to a broadcast DMAC",
141 	"Packets sent with an octet count == 64",
142 	"Packets sent with an octet count of 65-127",
143 	"Packets sent with an octet count of 128-255",
144 	"Packets sent with an octet count of 256-511",
145 	"Packets sent with an octet count of 512-1023",
146 	"Packets sent with an octet count of 1024-1518",
147 	"Packets sent with an octet count of > 1518",
148 	"CBFC(class based flow control) pause frames transmitted for class 0",
149 	"CBFC pause frames transmitted for class 1",
150 	"CBFC pause frames transmitted for class 2",
151 	"CBFC pause frames transmitted for class 3",
152 	"CBFC pause frames transmitted for class 4",
153 	"CBFC pause frames transmitted for class 5",
154 	"CBFC pause frames transmitted for class 6",
155 	"CBFC pause frames transmitted for class 7",
156 	"CBFC pause frames transmitted for class 8",
157 	"CBFC pause frames transmitted for class 9",
158 	"CBFC pause frames transmitted for class 10",
159 	"CBFC pause frames transmitted for class 11",
160 	"CBFC pause frames transmitted for class 12",
161 	"CBFC pause frames transmitted for class 13",
162 	"CBFC pause frames transmitted for class 14",
163 	"CBFC pause frames transmitted for class 15",
164 	"MAC control packets sent",
165 	"Total frames sent on the interface"
166 };
167 
168 enum cpt_eng_type {
169 	CPT_AE_TYPE = 1,
170 	CPT_SE_TYPE = 2,
171 	CPT_IE_TYPE = 3,
172 };
173 
174 #define rvu_dbg_NULL NULL
175 #define rvu_dbg_open_NULL NULL
176 
177 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
178 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
179 { \
180 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
181 } \
182 static const struct file_operations rvu_dbg_##name##_fops = { \
183 	.owner		= THIS_MODULE, \
184 	.open		= rvu_dbg_open_##name, \
185 	.read		= seq_read, \
186 	.write		= rvu_dbg_##write_op, \
187 	.llseek		= seq_lseek, \
188 	.release	= single_release, \
189 }
190 
191 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
192 static const struct file_operations rvu_dbg_##name##_fops = { \
193 	.owner = THIS_MODULE, \
194 	.open = simple_open, \
195 	.read = rvu_dbg_##read_op, \
196 	.write = rvu_dbg_##write_op \
197 }
198 
199 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
200 
rvu_dbg_mcs_port_stats_display(struct seq_file * filp,void * unused,int dir)201 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
202 {
203 	struct mcs *mcs = filp->private;
204 	struct mcs_port_stats stats;
205 	int lmac;
206 
207 	seq_puts(filp, "\n port stats\n");
208 	mutex_lock(&mcs->stats_lock);
209 	for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
210 		mcs_get_port_stats(mcs, &stats, lmac, dir);
211 		seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
212 		seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
213 
214 		if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
215 			seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
216 				   stats.preempt_err_cnt);
217 		if (dir == MCS_TX)
218 			seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
219 				   stats.sectag_insert_err_cnt);
220 	}
221 	mutex_unlock(&mcs->stats_lock);
222 	return 0;
223 }
224 
rvu_dbg_mcs_rx_port_stats_display(struct seq_file * filp,void * unused)225 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
226 {
227 	return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
228 }
229 
230 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
231 
rvu_dbg_mcs_tx_port_stats_display(struct seq_file * filp,void * unused)232 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
233 {
234 	return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
235 }
236 
237 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
238 
rvu_dbg_mcs_sa_stats_display(struct seq_file * filp,void * unused,int dir)239 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
240 {
241 	struct mcs *mcs = filp->private;
242 	struct mcs_sa_stats stats;
243 	struct rsrc_bmap *map;
244 	int sa_id;
245 
246 	if (dir == MCS_TX) {
247 		map = &mcs->tx.sa;
248 		mutex_lock(&mcs->stats_lock);
249 		for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
250 			seq_puts(filp, "\n TX SA stats\n");
251 			mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
252 			seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
253 				   stats.pkt_encrypt_cnt);
254 
255 			seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
256 				   stats.pkt_protected_cnt);
257 		}
258 		mutex_unlock(&mcs->stats_lock);
259 		return 0;
260 	}
261 
262 	/* RX stats */
263 	map = &mcs->rx.sa;
264 	mutex_lock(&mcs->stats_lock);
265 	for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
266 		seq_puts(filp, "\n RX SA stats\n");
267 		mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
268 		seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
269 		seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
270 		seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
271 		seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
272 		seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
273 	}
274 	mutex_unlock(&mcs->stats_lock);
275 	return 0;
276 }
277 
rvu_dbg_mcs_rx_sa_stats_display(struct seq_file * filp,void * unused)278 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
279 {
280 	return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
281 }
282 
283 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
284 
rvu_dbg_mcs_tx_sa_stats_display(struct seq_file * filp,void * unused)285 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
286 {
287 	return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
288 }
289 
290 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
291 
rvu_dbg_mcs_tx_sc_stats_display(struct seq_file * filp,void * unused)292 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
293 {
294 	struct mcs *mcs = filp->private;
295 	struct mcs_sc_stats stats;
296 	struct rsrc_bmap *map;
297 	int sc_id;
298 
299 	map = &mcs->tx.sc;
300 	seq_puts(filp, "\n SC stats\n");
301 
302 	mutex_lock(&mcs->stats_lock);
303 	for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
304 		mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
305 		seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
306 		seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
307 		seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
308 
309 		if (mcs->hw->mcs_blks == 1) {
310 			seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
311 				   stats.octet_encrypt_cnt);
312 			seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
313 				   stats.octet_protected_cnt);
314 		}
315 	}
316 	mutex_unlock(&mcs->stats_lock);
317 	return 0;
318 }
319 
320 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
321 
rvu_dbg_mcs_rx_sc_stats_display(struct seq_file * filp,void * unused)322 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
323 {
324 	struct mcs *mcs = filp->private;
325 	struct mcs_sc_stats stats;
326 	struct rsrc_bmap *map;
327 	int sc_id;
328 
329 	map = &mcs->rx.sc;
330 	seq_puts(filp, "\n SC stats\n");
331 
332 	mutex_lock(&mcs->stats_lock);
333 	for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
334 		mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
335 		seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
336 		seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
337 		seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
338 		seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
339 		seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
340 		seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
341 
342 		if (mcs->hw->mcs_blks > 1) {
343 			seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
344 			seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
345 		}
346 		if (mcs->hw->mcs_blks == 1) {
347 			seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
348 				   stats.octet_decrypt_cnt);
349 			seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
350 				   stats.octet_validate_cnt);
351 		}
352 	}
353 	mutex_unlock(&mcs->stats_lock);
354 	return 0;
355 }
356 
357 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
358 
rvu_dbg_mcs_flowid_stats_display(struct seq_file * filp,void * unused,int dir)359 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
360 {
361 	struct mcs *mcs = filp->private;
362 	struct mcs_flowid_stats stats;
363 	struct rsrc_bmap *map;
364 	int flow_id;
365 
366 	seq_puts(filp, "\n Flowid stats\n");
367 
368 	if (dir == MCS_RX)
369 		map = &mcs->rx.flow_ids;
370 	else
371 		map = &mcs->tx.flow_ids;
372 
373 	mutex_lock(&mcs->stats_lock);
374 	for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
375 		mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
376 		seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
377 	}
378 	mutex_unlock(&mcs->stats_lock);
379 	return 0;
380 }
381 
rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file * filp,void * unused)382 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
383 {
384 	return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
385 }
386 
387 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
388 
rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file * filp,void * unused)389 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
390 {
391 	return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
392 }
393 
394 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
395 
rvu_dbg_mcs_tx_secy_stats_display(struct seq_file * filp,void * unused)396 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
397 {
398 	struct mcs *mcs = filp->private;
399 	struct mcs_secy_stats stats;
400 	struct rsrc_bmap *map;
401 	int secy_id;
402 
403 	map = &mcs->tx.secy;
404 	seq_puts(filp, "\n MCS TX secy stats\n");
405 
406 	mutex_lock(&mcs->stats_lock);
407 	for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
408 		mcs_get_tx_secy_stats(mcs, &stats, secy_id);
409 		seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
410 		seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
411 			   stats.ctl_pkt_bcast_cnt);
412 		seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
413 			   stats.ctl_pkt_mcast_cnt);
414 		seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
415 			   stats.ctl_pkt_ucast_cnt);
416 		seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
417 		seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
418 			   stats.unctl_pkt_bcast_cnt);
419 		seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
420 			   stats.unctl_pkt_mcast_cnt);
421 		seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
422 			   stats.unctl_pkt_ucast_cnt);
423 		seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
424 		seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
425 			   stats.octet_encrypted_cnt);
426 		seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
427 			   stats.octet_protected_cnt);
428 		seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
429 			   stats.pkt_noactivesa_cnt);
430 		seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
431 		seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
432 	}
433 	mutex_unlock(&mcs->stats_lock);
434 	return 0;
435 }
436 
437 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
438 
rvu_dbg_mcs_rx_secy_stats_display(struct seq_file * filp,void * unused)439 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
440 {
441 	struct mcs *mcs = filp->private;
442 	struct mcs_secy_stats stats;
443 	struct rsrc_bmap *map;
444 	int secy_id;
445 
446 	map = &mcs->rx.secy;
447 	seq_puts(filp, "\n MCS secy stats\n");
448 
449 	mutex_lock(&mcs->stats_lock);
450 	for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
451 		mcs_get_rx_secy_stats(mcs, &stats, secy_id);
452 		seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
453 		seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
454 			   stats.ctl_pkt_bcast_cnt);
455 		seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
456 			   stats.ctl_pkt_mcast_cnt);
457 		seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
458 			   stats.ctl_pkt_ucast_cnt);
459 		seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
460 		seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
461 			   stats.unctl_pkt_bcast_cnt);
462 		seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
463 			   stats.unctl_pkt_mcast_cnt);
464 		seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
465 			   stats.unctl_pkt_ucast_cnt);
466 		seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
467 		seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
468 			   stats.octet_decrypted_cnt);
469 		seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
470 			   stats.octet_validated_cnt);
471 		seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
472 			   stats.pkt_port_disabled_cnt);
473 		seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
474 		seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
475 			   stats.pkt_nosa_cnt);
476 		seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
477 			   stats.pkt_nosaerror_cnt);
478 		seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
479 			   stats.pkt_tagged_ctl_cnt);
480 		seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
481 		seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
482 		if (mcs->hw->mcs_blks > 1)
483 			seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
484 				   stats.pkt_notag_cnt);
485 	}
486 	mutex_unlock(&mcs->stats_lock);
487 	return 0;
488 }
489 
490 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
491 
rvu_dbg_mcs_init(struct rvu * rvu)492 static void rvu_dbg_mcs_init(struct rvu *rvu)
493 {
494 	struct mcs *mcs;
495 	char dname[10];
496 	int i;
497 
498 	if (!rvu->mcs_blk_cnt)
499 		return;
500 
501 	rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
502 
503 	for (i = 0; i < rvu->mcs_blk_cnt; i++) {
504 		mcs = mcs_get_pdata(i);
505 
506 		sprintf(dname, "mcs%d", i);
507 		rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
508 						      rvu->rvu_dbg.mcs_root);
509 
510 		rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
511 
512 		debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
513 				    &rvu_dbg_mcs_rx_flowid_stats_fops);
514 
515 		debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
516 				    &rvu_dbg_mcs_rx_secy_stats_fops);
517 
518 		debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
519 				    &rvu_dbg_mcs_rx_sc_stats_fops);
520 
521 		debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
522 				    &rvu_dbg_mcs_rx_sa_stats_fops);
523 
524 		debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
525 				    &rvu_dbg_mcs_rx_port_stats_fops);
526 
527 		rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
528 
529 		debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
530 				    &rvu_dbg_mcs_tx_flowid_stats_fops);
531 
532 		debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
533 				    &rvu_dbg_mcs_tx_secy_stats_fops);
534 
535 		debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
536 				    &rvu_dbg_mcs_tx_sc_stats_fops);
537 
538 		debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
539 				    &rvu_dbg_mcs_tx_sa_stats_fops);
540 
541 		debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
542 				    &rvu_dbg_mcs_tx_port_stats_fops);
543 	}
544 }
545 
546 #define LMT_MAPTBL_ENTRY_SIZE 16
547 /* Dump LMTST map table */
rvu_dbg_lmtst_map_table_display(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)548 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
549 					       char __user *buffer,
550 					       size_t count, loff_t *ppos)
551 {
552 	struct rvu *rvu = filp->private_data;
553 	u64 lmt_addr, val, tbl_base;
554 	int pf, vf, num_vfs, hw_vfs;
555 	void __iomem *lmt_map_base;
556 	int buf_size = 10240;
557 	size_t off = 0;
558 	int index = 0;
559 	char *buf;
560 	int ret;
561 
562 	/* don't allow partial reads */
563 	if (*ppos != 0)
564 		return 0;
565 
566 	buf = kzalloc(buf_size, GFP_KERNEL);
567 	if (!buf)
568 		return -ENOMEM;
569 
570 	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
571 
572 	lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
573 	if (!lmt_map_base) {
574 		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
575 		kfree(buf);
576 		return false;
577 	}
578 
579 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
580 			  "\n\t\t\t\t\tLmtst Map Table Entries");
581 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
582 			  "\n\t\t\t\t\t=======================");
583 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
584 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
585 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
586 			  "Lmtline Base (word 0)\t\t");
587 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
588 			  "Lmt Map Entry (word 1)");
589 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
590 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
591 		off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d  \t\t\t",
592 				    pf);
593 
594 		index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
595 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
596 				 (tbl_base + index));
597 		lmt_addr = readq(lmt_map_base + index);
598 		off += scnprintf(&buf[off], buf_size - 1 - off,
599 				 " 0x%016llx\t\t", lmt_addr);
600 		index += 8;
601 		val = readq(lmt_map_base + index);
602 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
603 				 val);
604 		/* Reading num of VFs per PF */
605 		rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
606 		for (vf = 0; vf < num_vfs; vf++) {
607 			index = (pf * rvu->hw->total_vfs * 16) +
608 				((vf + 1)  * LMT_MAPTBL_ENTRY_SIZE);
609 			off += scnprintf(&buf[off], buf_size - 1 - off,
610 					    "PF%d:VF%d  \t\t", pf, vf);
611 			off += scnprintf(&buf[off], buf_size - 1 - off,
612 					 " 0x%llx\t\t", (tbl_base + index));
613 			lmt_addr = readq(lmt_map_base + index);
614 			off += scnprintf(&buf[off], buf_size - 1 - off,
615 					 " 0x%016llx\t\t", lmt_addr);
616 			index += 8;
617 			val = readq(lmt_map_base + index);
618 			off += scnprintf(&buf[off], buf_size - 1 - off,
619 					 " 0x%016llx\n", val);
620 		}
621 	}
622 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
623 
624 	ret = min(off, count);
625 	if (copy_to_user(buffer, buf, ret))
626 		ret = -EFAULT;
627 	kfree(buf);
628 
629 	iounmap(lmt_map_base);
630 	if (ret < 0)
631 		return ret;
632 
633 	*ppos = ret;
634 	return ret;
635 }
636 
637 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
638 
get_lf_str_list(const struct rvu_block * block,int pcifunc,char * lfs)639 static void get_lf_str_list(const struct rvu_block *block, int pcifunc,
640 			    char *lfs)
641 {
642 	int lf = 0, seq = 0, len = 0, prev_lf = block->lf.max;
643 
644 	for_each_set_bit(lf, block->lf.bmap, block->lf.max) {
645 		if (lf >= block->lf.max)
646 			break;
647 
648 		if (block->fn_map[lf] != pcifunc)
649 			continue;
650 
651 		if (lf == prev_lf + 1) {
652 			prev_lf = lf;
653 			seq = 1;
654 			continue;
655 		}
656 
657 		if (seq)
658 			len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
659 		else
660 			len += (len ? sprintf(lfs + len, ",%d", lf) :
661 				      sprintf(lfs + len, "%d", lf));
662 
663 		prev_lf = lf;
664 		seq = 0;
665 	}
666 
667 	if (seq)
668 		len += sprintf(lfs + len, "-%d", prev_lf);
669 
670 	lfs[len] = '\0';
671 }
672 
get_max_column_width(struct rvu * rvu)673 static int get_max_column_width(struct rvu *rvu)
674 {
675 	int index, pf, vf, lf_str_size = 12, buf_size = 256;
676 	struct rvu_block block;
677 	u16 pcifunc;
678 	char *buf;
679 
680 	buf = kzalloc(buf_size, GFP_KERNEL);
681 	if (!buf)
682 		return -ENOMEM;
683 
684 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
685 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
686 			pcifunc = pf << 10 | vf;
687 			if (!pcifunc)
688 				continue;
689 
690 			for (index = 0; index < BLK_COUNT; index++) {
691 				block = rvu->hw->block[index];
692 				if (!strlen(block.name))
693 					continue;
694 
695 				get_lf_str_list(&block, pcifunc, buf);
696 				if (lf_str_size <= strlen(buf))
697 					lf_str_size = strlen(buf) + 1;
698 			}
699 		}
700 	}
701 
702 	kfree(buf);
703 	return lf_str_size;
704 }
705 
706 /* Dumps current provisioning status of all RVU block LFs */
rvu_dbg_rsrc_attach_status(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)707 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
708 					  char __user *buffer,
709 					  size_t count, loff_t *ppos)
710 {
711 	int index, off = 0, flag = 0, len = 0, i = 0;
712 	struct rvu *rvu = filp->private_data;
713 	int bytes_not_copied = 0;
714 	struct rvu_block block;
715 	int pf, vf, pcifunc;
716 	int buf_size = 2048;
717 	int lf_str_size;
718 	char *lfs;
719 	char *buf;
720 
721 	/* don't allow partial reads */
722 	if (*ppos != 0)
723 		return 0;
724 
725 	buf = kzalloc(buf_size, GFP_KERNEL);
726 	if (!buf)
727 		return -ENOMEM;
728 
729 	/* Get the maximum width of a column */
730 	lf_str_size = get_max_column_width(rvu);
731 
732 	lfs = kzalloc(lf_str_size, GFP_KERNEL);
733 	if (!lfs) {
734 		kfree(buf);
735 		return -ENOMEM;
736 	}
737 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
738 			  "pcifunc");
739 	for (index = 0; index < BLK_COUNT; index++)
740 		if (strlen(rvu->hw->block[index].name)) {
741 			off += scnprintf(&buf[off], buf_size - 1 - off,
742 					 "%-*s", lf_str_size,
743 					 rvu->hw->block[index].name);
744 		}
745 
746 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
747 	bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
748 	if (bytes_not_copied)
749 		goto out;
750 
751 	i++;
752 	*ppos += off;
753 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
754 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
755 			off = 0;
756 			flag = 0;
757 			pcifunc = pf << 10 | vf;
758 			if (!pcifunc)
759 				continue;
760 
761 			if (vf) {
762 				sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
763 				off = scnprintf(&buf[off],
764 						buf_size - 1 - off,
765 						"%-*s", lf_str_size, lfs);
766 			} else {
767 				sprintf(lfs, "PF%d", pf);
768 				off = scnprintf(&buf[off],
769 						buf_size - 1 - off,
770 						"%-*s", lf_str_size, lfs);
771 			}
772 
773 			for (index = 0; index < BLK_COUNT; index++) {
774 				block = rvu->hw->block[index];
775 				if (!strlen(block.name))
776 					continue;
777 				len = 0;
778 				lfs[len] = '\0';
779 				get_lf_str_list(&block, pcifunc, lfs);
780 				if (strlen(lfs))
781 					flag = 1;
782 
783 				off += scnprintf(&buf[off], buf_size - 1 - off,
784 						 "%-*s", lf_str_size, lfs);
785 			}
786 			if (flag) {
787 				off +=	scnprintf(&buf[off],
788 						  buf_size - 1 - off, "\n");
789 				bytes_not_copied = copy_to_user(buffer +
790 								(i * off),
791 								buf, off);
792 				if (bytes_not_copied)
793 					goto out;
794 
795 				i++;
796 				*ppos += off;
797 			}
798 		}
799 	}
800 
801 out:
802 	kfree(lfs);
803 	kfree(buf);
804 	if (bytes_not_copied)
805 		return -EFAULT;
806 
807 	return *ppos;
808 }
809 
810 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
811 
rvu_dbg_rvu_pf_cgx_map_display(struct seq_file * filp,void * unused)812 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
813 {
814 	char cgx[10], lmac[10], chan[10];
815 	struct rvu *rvu = filp->private;
816 	struct pci_dev *pdev = NULL;
817 	struct mac_ops *mac_ops;
818 	struct rvu_pfvf *pfvf;
819 	int pf, domain, blkid;
820 	u8 cgx_id, lmac_id;
821 	u16 pcifunc;
822 
823 	domain = 2;
824 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
825 	/* There can be no CGX devices at all */
826 	if (!mac_ops)
827 		return 0;
828 	seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\tCHAN\n",
829 		   mac_ops->name);
830 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
831 		if (!is_pf_cgxmapped(rvu, pf))
832 			continue;
833 
834 		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
835 		if (!pdev)
836 			continue;
837 
838 		cgx[0] = 0;
839 		lmac[0] = 0;
840 		pcifunc = pf << 10;
841 		pfvf = rvu_get_pfvf(rvu, pcifunc);
842 
843 		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
844 			blkid = 0;
845 		else
846 			blkid = 1;
847 
848 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
849 				    &lmac_id);
850 		sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
851 		sprintf(lmac, "LMAC%d", lmac_id);
852 		sprintf(chan, "%d",
853 			rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0));
854 		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\t%s\n",
855 			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac,
856 			   chan);
857 
858 		pci_dev_put(pdev);
859 	}
860 	return 0;
861 }
862 
863 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
864 
rvu_dbg_is_valid_lf(struct rvu * rvu,int blkaddr,int lf,u16 * pcifunc)865 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
866 				u16 *pcifunc)
867 {
868 	struct rvu_block *block;
869 	struct rvu_hwinfo *hw;
870 
871 	hw = rvu->hw;
872 	block = &hw->block[blkaddr];
873 
874 	if (lf < 0 || lf >= block->lf.max) {
875 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
876 			 block->lf.max - 1);
877 		return false;
878 	}
879 
880 	*pcifunc = block->fn_map[lf];
881 	if (!*pcifunc) {
882 		dev_warn(rvu->dev,
883 			 "This LF is not attached to any RVU PFFUNC\n");
884 		return false;
885 	}
886 	return true;
887 }
888 
print_npa_qsize(struct seq_file * m,struct rvu_pfvf * pfvf)889 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
890 {
891 	char *buf;
892 
893 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
894 	if (!buf)
895 		return;
896 
897 	if (!pfvf->aura_ctx) {
898 		seq_puts(m, "Aura context is not initialized\n");
899 	} else {
900 		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
901 					pfvf->aura_ctx->qsize);
902 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
903 		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
904 	}
905 
906 	if (!pfvf->pool_ctx) {
907 		seq_puts(m, "Pool context is not initialized\n");
908 	} else {
909 		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
910 					pfvf->pool_ctx->qsize);
911 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
912 		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
913 	}
914 	kfree(buf);
915 }
916 
917 /* The 'qsize' entry dumps current Aura/Pool context Qsize
918  * and each context's current enable/disable status in a bitmap.
919  */
rvu_dbg_qsize_display(struct seq_file * s,void * unsused,int blktype)920 static int rvu_dbg_qsize_display(struct seq_file *s, void *unsused,
921 				 int blktype)
922 {
923 	void (*print_qsize)(struct seq_file *s,
924 			    struct rvu_pfvf *pfvf) = NULL;
925 	struct rvu_pfvf *pfvf;
926 	struct rvu *rvu;
927 	int qsize_id;
928 	u16 pcifunc;
929 	int blkaddr;
930 
931 	rvu = s->private;
932 	switch (blktype) {
933 	case BLKTYPE_NPA:
934 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
935 		print_qsize = print_npa_qsize;
936 		break;
937 
938 	case BLKTYPE_NIX:
939 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
940 		print_qsize = print_nix_qsize;
941 		break;
942 
943 	default:
944 		return -EINVAL;
945 	}
946 
947 	if (blktype == BLKTYPE_NPA)
948 		blkaddr = BLKADDR_NPA;
949 	else
950 		blkaddr = debugfs_get_aux_num(s->file);
951 
952 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
953 		return -EINVAL;
954 
955 	pfvf = rvu_get_pfvf(rvu, pcifunc);
956 	print_qsize(s, pfvf);
957 
958 	return 0;
959 }
960 
rvu_dbg_qsize_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos,int blktype)961 static ssize_t rvu_dbg_qsize_write(struct file *file,
962 				   const char __user *buffer, size_t count,
963 				   loff_t *ppos, int blktype)
964 {
965 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
966 	struct seq_file *seqfile = file->private_data;
967 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
968 	struct rvu *rvu = seqfile->private;
969 	int blkaddr;
970 	u16 pcifunc;
971 	int ret, lf;
972 
973 	cmd_buf = memdup_user_nul(buffer, count);
974 	if (IS_ERR(cmd_buf))
975 		return -ENOMEM;
976 
977 	cmd_buf_tmp = strchr(cmd_buf, '\n');
978 	if (cmd_buf_tmp) {
979 		*cmd_buf_tmp = '\0';
980 		count = cmd_buf_tmp - cmd_buf + 1;
981 	}
982 
983 	cmd_buf_tmp = cmd_buf;
984 	subtoken = strsep(&cmd_buf, " ");
985 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
986 	if (cmd_buf)
987 		ret = -EINVAL;
988 
989 	if (ret < 0 || !strncmp(subtoken, "help", 4)) {
990 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
991 		goto qsize_write_done;
992 	}
993 
994 	if (blktype == BLKTYPE_NPA)
995 		blkaddr = BLKADDR_NPA;
996 	else
997 		blkaddr = debugfs_get_aux_num(file);
998 
999 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
1000 		ret = -EINVAL;
1001 		goto qsize_write_done;
1002 	}
1003 	if (blktype  == BLKTYPE_NPA)
1004 		rvu->rvu_dbg.npa_qsize_id = lf;
1005 	else
1006 		rvu->rvu_dbg.nix_qsize_id = lf;
1007 
1008 qsize_write_done:
1009 	kfree(cmd_buf_tmp);
1010 	return ret ? ret : count;
1011 }
1012 
rvu_dbg_npa_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1013 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
1014 				       const char __user *buffer,
1015 				       size_t count, loff_t *ppos)
1016 {
1017 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1018 					    BLKTYPE_NPA);
1019 }
1020 
rvu_dbg_npa_qsize_display(struct seq_file * filp,void * unused)1021 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
1022 {
1023 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
1024 }
1025 
1026 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
1027 
1028 /* Dumps given NPA Aura's context */
print_npa_aura_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)1029 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1030 {
1031 	struct npa_aura_s *aura = &rsp->aura;
1032 	struct rvu *rvu = m->private;
1033 
1034 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
1035 
1036 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
1037 		   aura->ena, aura->pool_caching);
1038 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
1039 		   aura->pool_way_mask, aura->avg_con);
1040 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
1041 		   aura->pool_drop_ena, aura->aura_drop_ena);
1042 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
1043 		   aura->bp_ena, aura->aura_drop);
1044 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
1045 		   aura->shift, aura->avg_level);
1046 
1047 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
1048 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
1049 
1050 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
1051 		   (u64)aura->limit, aura->bp, aura->fc_ena);
1052 
1053 	if (!is_rvu_otx2(rvu))
1054 		seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
1055 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
1056 		   aura->fc_up_crossing, aura->fc_stype);
1057 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
1058 
1059 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
1060 
1061 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
1062 		   aura->pool_drop, aura->update_time);
1063 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
1064 		   aura->err_int, aura->err_int_ena);
1065 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
1066 		   aura->thresh_int, aura->thresh_int_ena);
1067 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
1068 		   aura->thresh_up, aura->thresh_qint_idx);
1069 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
1070 
1071 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
1072 	if (!is_rvu_otx2(rvu))
1073 		seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
1074 }
1075 
1076 /* Dumps given NPA Pool's context */
print_npa_pool_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)1077 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1078 {
1079 	struct npa_pool_s *pool = &rsp->pool;
1080 	struct rvu *rvu = m->private;
1081 
1082 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
1083 
1084 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
1085 		   pool->ena, pool->nat_align);
1086 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
1087 		   pool->stack_caching, pool->stack_way_mask);
1088 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
1089 		   pool->buf_offset, pool->buf_size);
1090 
1091 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
1092 		   pool->stack_max_pages, pool->stack_pages);
1093 
1094 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
1095 
1096 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
1097 		   pool->stack_offset, pool->shift, pool->avg_level);
1098 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
1099 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
1100 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
1101 		   pool->fc_hyst_bits, pool->fc_up_crossing);
1102 	if (!is_rvu_otx2(rvu))
1103 		seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
1104 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
1105 
1106 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
1107 
1108 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
1109 
1110 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
1111 
1112 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
1113 		   pool->err_int, pool->err_int_ena);
1114 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
1115 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
1116 		   pool->thresh_int_ena, pool->thresh_up);
1117 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
1118 		   pool->thresh_qint_idx, pool->err_qint_idx);
1119 	if (!is_rvu_otx2(rvu))
1120 		seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
1121 }
1122 
1123 /* Reads aura/pool's ctx from admin queue */
rvu_dbg_npa_ctx_display(struct seq_file * m,void * unused,int ctype)1124 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
1125 {
1126 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
1127 	struct npa_aq_enq_req aq_req;
1128 	struct npa_aq_enq_rsp rsp;
1129 	struct rvu_pfvf *pfvf;
1130 	int aura, rc, max_id;
1131 	int npalf, id, all;
1132 	struct rvu *rvu;
1133 	u16 pcifunc;
1134 
1135 	rvu = m->private;
1136 
1137 	switch (ctype) {
1138 	case NPA_AQ_CTYPE_AURA:
1139 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
1140 		id = rvu->rvu_dbg.npa_aura_ctx.id;
1141 		all = rvu->rvu_dbg.npa_aura_ctx.all;
1142 		break;
1143 
1144 	case NPA_AQ_CTYPE_POOL:
1145 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
1146 		id = rvu->rvu_dbg.npa_pool_ctx.id;
1147 		all = rvu->rvu_dbg.npa_pool_ctx.all;
1148 		break;
1149 	default:
1150 		return -EINVAL;
1151 	}
1152 
1153 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1154 		return -EINVAL;
1155 
1156 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1157 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
1158 		seq_puts(m, "Aura context is not initialized\n");
1159 		return -EINVAL;
1160 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
1161 		seq_puts(m, "Pool context is not initialized\n");
1162 		return -EINVAL;
1163 	}
1164 
1165 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
1166 	aq_req.hdr.pcifunc = pcifunc;
1167 	aq_req.ctype = ctype;
1168 	aq_req.op = NPA_AQ_INSTOP_READ;
1169 	if (ctype == NPA_AQ_CTYPE_AURA) {
1170 		max_id = pfvf->aura_ctx->qsize;
1171 		print_npa_ctx = print_npa_aura_ctx;
1172 	} else {
1173 		max_id = pfvf->pool_ctx->qsize;
1174 		print_npa_ctx = print_npa_pool_ctx;
1175 	}
1176 
1177 	if (id < 0 || id >= max_id) {
1178 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
1179 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1180 			max_id - 1);
1181 		return -EINVAL;
1182 	}
1183 
1184 	if (all)
1185 		id = 0;
1186 	else
1187 		max_id = id + 1;
1188 
1189 	for (aura = id; aura < max_id; aura++) {
1190 		aq_req.aura_id = aura;
1191 
1192 		/* Skip if queue is uninitialized */
1193 		if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
1194 			continue;
1195 
1196 		seq_printf(m, "======%s : %d=======\n",
1197 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
1198 			aq_req.aura_id);
1199 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
1200 		if (rc) {
1201 			seq_puts(m, "Failed to read context\n");
1202 			return -EINVAL;
1203 		}
1204 		print_npa_ctx(m, &rsp);
1205 	}
1206 	return 0;
1207 }
1208 
write_npa_ctx(struct rvu * rvu,bool all,int npalf,int id,int ctype)1209 static int write_npa_ctx(struct rvu *rvu, bool all,
1210 			 int npalf, int id, int ctype)
1211 {
1212 	struct rvu_pfvf *pfvf;
1213 	int max_id = 0;
1214 	u16 pcifunc;
1215 
1216 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1217 		return -EINVAL;
1218 
1219 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1220 
1221 	if (ctype == NPA_AQ_CTYPE_AURA) {
1222 		if (!pfvf->aura_ctx) {
1223 			dev_warn(rvu->dev, "Aura context is not initialized\n");
1224 			return -EINVAL;
1225 		}
1226 		max_id = pfvf->aura_ctx->qsize;
1227 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
1228 		if (!pfvf->pool_ctx) {
1229 			dev_warn(rvu->dev, "Pool context is not initialized\n");
1230 			return -EINVAL;
1231 		}
1232 		max_id = pfvf->pool_ctx->qsize;
1233 	}
1234 
1235 	if (id < 0 || id >= max_id) {
1236 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
1237 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1238 			max_id - 1);
1239 		return -EINVAL;
1240 	}
1241 
1242 	switch (ctype) {
1243 	case NPA_AQ_CTYPE_AURA:
1244 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
1245 		rvu->rvu_dbg.npa_aura_ctx.id = id;
1246 		rvu->rvu_dbg.npa_aura_ctx.all = all;
1247 		break;
1248 
1249 	case NPA_AQ_CTYPE_POOL:
1250 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
1251 		rvu->rvu_dbg.npa_pool_ctx.id = id;
1252 		rvu->rvu_dbg.npa_pool_ctx.all = all;
1253 		break;
1254 	default:
1255 		return -EINVAL;
1256 	}
1257 	return 0;
1258 }
1259 
parse_cmd_buffer_ctx(char * cmd_buf,size_t * count,const char __user * buffer,int * npalf,int * id,bool * all)1260 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
1261 				const char __user *buffer, int *npalf,
1262 				int *id, bool *all)
1263 {
1264 	int bytes_not_copied;
1265 	char *cmd_buf_tmp;
1266 	char *subtoken;
1267 	int ret;
1268 
1269 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
1270 	if (bytes_not_copied)
1271 		return -EFAULT;
1272 
1273 	cmd_buf[*count] = '\0';
1274 	cmd_buf_tmp = strchr(cmd_buf, '\n');
1275 
1276 	if (cmd_buf_tmp) {
1277 		*cmd_buf_tmp = '\0';
1278 		*count = cmd_buf_tmp - cmd_buf + 1;
1279 	}
1280 
1281 	subtoken = strsep(&cmd_buf, " ");
1282 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
1283 	if (ret < 0)
1284 		return ret;
1285 	subtoken = strsep(&cmd_buf, " ");
1286 	if (subtoken && strcmp(subtoken, "all") == 0) {
1287 		*all = true;
1288 	} else {
1289 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
1290 		if (ret < 0)
1291 			return ret;
1292 	}
1293 	if (cmd_buf)
1294 		return -EINVAL;
1295 	return ret;
1296 }
1297 
rvu_dbg_npa_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)1298 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
1299 				     const char __user *buffer,
1300 				     size_t count, loff_t *ppos, int ctype)
1301 {
1302 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
1303 					"aura" : "pool";
1304 	struct seq_file *seqfp = filp->private_data;
1305 	struct rvu *rvu = seqfp->private;
1306 	int npalf, id = 0, ret;
1307 	bool all = false;
1308 
1309 	if ((*ppos != 0) || !count)
1310 		return -EINVAL;
1311 
1312 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1313 	if (!cmd_buf)
1314 		return count;
1315 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1316 				   &npalf, &id, &all);
1317 	if (ret < 0) {
1318 		dev_info(rvu->dev,
1319 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1320 			 ctype_string, ctype_string);
1321 		goto done;
1322 	} else {
1323 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1324 	}
1325 done:
1326 	kfree(cmd_buf);
1327 	return ret ? ret : count;
1328 }
1329 
rvu_dbg_npa_aura_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1330 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1331 					  const char __user *buffer,
1332 					  size_t count, loff_t *ppos)
1333 {
1334 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1335 				     NPA_AQ_CTYPE_AURA);
1336 }
1337 
rvu_dbg_npa_aura_ctx_display(struct seq_file * filp,void * unused)1338 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1339 {
1340 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1341 }
1342 
1343 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1344 
rvu_dbg_npa_pool_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1345 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1346 					  const char __user *buffer,
1347 					  size_t count, loff_t *ppos)
1348 {
1349 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1350 				     NPA_AQ_CTYPE_POOL);
1351 }
1352 
rvu_dbg_npa_pool_ctx_display(struct seq_file * filp,void * unused)1353 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1354 {
1355 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1356 }
1357 
1358 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1359 
ndc_cache_stats(struct seq_file * s,int blk_addr,int ctype,int transaction)1360 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1361 			    int ctype, int transaction)
1362 {
1363 	u64 req, out_req, lat, cant_alloc;
1364 	struct nix_hw *nix_hw;
1365 	struct rvu *rvu;
1366 	int port;
1367 
1368 	if (blk_addr == BLKADDR_NDC_NPA0) {
1369 		rvu = s->private;
1370 	} else {
1371 		nix_hw = s->private;
1372 		rvu = nix_hw->rvu;
1373 	}
1374 
1375 	for (port = 0; port < NDC_MAX_PORT; port++) {
1376 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1377 						(port, ctype, transaction));
1378 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1379 						(port, ctype, transaction));
1380 		out_req = rvu_read64(rvu, blk_addr,
1381 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1382 				     (port, ctype, transaction));
1383 		cant_alloc = rvu_read64(rvu, blk_addr,
1384 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1385 					(port, transaction));
1386 		seq_printf(s, "\nPort:%d\n", port);
1387 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1388 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1389 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1390 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1391 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1392 	}
1393 }
1394 
ndc_blk_cache_stats(struct seq_file * s,int idx,int blk_addr)1395 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1396 {
1397 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
1398 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1399 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
1400 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1401 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1402 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1403 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1404 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1405 	return 0;
1406 }
1407 
rvu_dbg_npa_ndc_cache_display(struct seq_file * filp,void * unused)1408 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1409 {
1410 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1411 }
1412 
1413 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1414 
ndc_blk_hits_miss_stats(struct seq_file * s,int idx,int blk_addr)1415 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1416 {
1417 	struct nix_hw *nix_hw;
1418 	struct rvu *rvu;
1419 	int bank, max_bank;
1420 	u64 ndc_af_const;
1421 
1422 	if (blk_addr == BLKADDR_NDC_NPA0) {
1423 		rvu = s->private;
1424 	} else {
1425 		nix_hw = s->private;
1426 		rvu = nix_hw->rvu;
1427 	}
1428 
1429 	ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
1430 	max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
1431 	for (bank = 0; bank < max_bank; bank++) {
1432 		seq_printf(s, "BANK:%d\n", bank);
1433 		seq_printf(s, "\tHits:\t%lld\n",
1434 			   (u64)rvu_read64(rvu, blk_addr,
1435 			   NDC_AF_BANKX_HIT_PC(bank)));
1436 		seq_printf(s, "\tMiss:\t%lld\n",
1437 			   (u64)rvu_read64(rvu, blk_addr,
1438 			    NDC_AF_BANKX_MISS_PC(bank)));
1439 	}
1440 	return 0;
1441 }
1442 
rvu_dbg_nix_ndc_rx_cache_display(struct seq_file * filp,void * unused)1443 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1444 {
1445 	struct nix_hw *nix_hw = filp->private;
1446 	int blkaddr = 0;
1447 	int ndc_idx = 0;
1448 
1449 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1450 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1451 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1452 
1453 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1454 }
1455 
1456 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1457 
rvu_dbg_nix_ndc_tx_cache_display(struct seq_file * filp,void * unused)1458 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1459 {
1460 	struct nix_hw *nix_hw = filp->private;
1461 	int blkaddr = 0;
1462 	int ndc_idx = 0;
1463 
1464 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1465 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1466 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1467 
1468 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1469 }
1470 
1471 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1472 
rvu_dbg_npa_ndc_hits_miss_display(struct seq_file * filp,void * unused)1473 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1474 					     void *unused)
1475 {
1476 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1477 }
1478 
1479 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1480 
rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file * filp,void * unused)1481 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1482 						void *unused)
1483 {
1484 	struct nix_hw *nix_hw = filp->private;
1485 	int ndc_idx = NPA0_U;
1486 	int blkaddr = 0;
1487 
1488 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1489 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1490 
1491 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1492 }
1493 
1494 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1495 
rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file * filp,void * unused)1496 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1497 						void *unused)
1498 {
1499 	struct nix_hw *nix_hw = filp->private;
1500 	int ndc_idx = NPA0_U;
1501 	int blkaddr = 0;
1502 
1503 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1504 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1505 
1506 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1507 }
1508 
1509 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1510 
print_nix_cn10k_sq_ctx(struct seq_file * m,struct nix_cn10k_sq_ctx_s * sq_ctx)1511 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1512 				   struct nix_cn10k_sq_ctx_s *sq_ctx)
1513 {
1514 	seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1515 		   sq_ctx->ena, sq_ctx->qint_idx);
1516 	seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1517 		   sq_ctx->substream, sq_ctx->sdp_mcast);
1518 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1519 		   sq_ctx->cq, sq_ctx->sqe_way_mask);
1520 
1521 	seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1522 		   sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1523 	seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1524 		   sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1525 	seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1526 		   sq_ctx->default_chan, sq_ctx->sqb_count);
1527 
1528 	seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1529 	seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1530 	seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1531 		   sq_ctx->sqb_aura, sq_ctx->sq_int);
1532 	seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1533 		   sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1534 
1535 	seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1536 		   sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1537 	seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1538 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1539 	seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1540 		   sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1541 	seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1542 		   sq_ctx->tail_offset, sq_ctx->smenq_offset);
1543 	seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1544 		   sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1545 
1546 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1547 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1548 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1549 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1550 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1551 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1552 		   sq_ctx->smenq_next_sqb);
1553 
1554 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1555 
1556 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1557 	seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1558 		   sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1559 	seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1560 		   sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1561 	seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1562 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1563 
1564 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1565 		   (u64)sq_ctx->scm_lso_rem);
1566 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1567 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1568 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1569 		   (u64)sq_ctx->dropped_octs);
1570 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1571 		   (u64)sq_ctx->dropped_pkts);
1572 }
1573 
print_tm_tree(struct seq_file * m,struct nix_aq_enq_rsp * rsp,u64 sq)1574 static void print_tm_tree(struct seq_file *m,
1575 			  struct nix_aq_enq_rsp *rsp, u64 sq)
1576 {
1577 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1578 	struct nix_hw *nix_hw = m->private;
1579 	struct rvu *rvu = nix_hw->rvu;
1580 	u16 p1, p2, p3, p4, schq;
1581 	int blkaddr;
1582 	u64 cfg;
1583 
1584 	blkaddr = nix_hw->blkaddr;
1585 	schq = sq_ctx->smq;
1586 
1587 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq));
1588 	p1 = FIELD_GET(NIX_AF_MDQ_PARENT_MASK, cfg);
1589 
1590 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(p1));
1591 	p2 = FIELD_GET(NIX_AF_TL4_PARENT_MASK, cfg);
1592 
1593 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(p2));
1594 	p3 = FIELD_GET(NIX_AF_TL3_PARENT_MASK, cfg);
1595 
1596 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(p3));
1597 	p4 = FIELD_GET(NIX_AF_TL2_PARENT_MASK, cfg);
1598 	seq_printf(m,
1599 		   "SQ(%llu) -> SMQ(%u) -> TL4(%u) -> TL3(%u) -> TL2(%u) -> TL1(%u)\n",
1600 		   sq, schq, p1, p2, p3, p4);
1601 }
1602 
1603 /*dumps given tm_tree registers*/
rvu_dbg_nix_tm_tree_display(struct seq_file * m,void * unused)1604 static int rvu_dbg_nix_tm_tree_display(struct seq_file *m, void *unused)
1605 {
1606 	int qidx, nixlf, rc, id, max_id = 0;
1607 	struct nix_hw *nix_hw = m->private;
1608 	struct rvu *rvu = nix_hw->rvu;
1609 	struct nix_aq_enq_req aq_req;
1610 	struct nix_aq_enq_rsp rsp;
1611 	struct rvu_pfvf *pfvf;
1612 	u16 pcifunc;
1613 
1614 	nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
1615 	id = rvu->rvu_dbg.nix_tm_ctx.id;
1616 
1617 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1618 		return -EINVAL;
1619 
1620 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1621 	max_id = pfvf->sq_ctx->qsize;
1622 
1623 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1624 	aq_req.hdr.pcifunc = pcifunc;
1625 	aq_req.ctype = NIX_AQ_CTYPE_SQ;
1626 	aq_req.op = NIX_AQ_INSTOP_READ;
1627 	seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
1628 	for (qidx = id; qidx < max_id; qidx++) {
1629 		aq_req.qidx = qidx;
1630 
1631 		/* Skip SQ's if not initialized */
1632 		if (!test_bit(qidx, pfvf->sq_bmap))
1633 			continue;
1634 
1635 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1636 
1637 		if (rc) {
1638 			seq_printf(m, "Failed to read SQ(%d) context\n",
1639 				   aq_req.qidx);
1640 			continue;
1641 		}
1642 		print_tm_tree(m, &rsp, aq_req.qidx);
1643 	}
1644 	return 0;
1645 }
1646 
rvu_dbg_nix_tm_tree_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1647 static ssize_t rvu_dbg_nix_tm_tree_write(struct file *filp,
1648 					 const char __user *buffer,
1649 					 size_t count, loff_t *ppos)
1650 {
1651 	struct seq_file *m = filp->private_data;
1652 	struct nix_hw *nix_hw = m->private;
1653 	struct rvu *rvu = nix_hw->rvu;
1654 	struct rvu_pfvf *pfvf;
1655 	u16 pcifunc;
1656 	u64 nixlf;
1657 	int ret;
1658 
1659 	ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
1660 	if (ret)
1661 		return ret;
1662 
1663 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1664 		return -EINVAL;
1665 
1666 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1667 	if (!pfvf->sq_ctx) {
1668 		dev_warn(rvu->dev, "SQ context is not initialized\n");
1669 		return -EINVAL;
1670 	}
1671 
1672 	rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
1673 	return count;
1674 }
1675 
1676 RVU_DEBUG_SEQ_FOPS(nix_tm_tree, nix_tm_tree_display, nix_tm_tree_write);
1677 
print_tm_topo(struct seq_file * m,u64 schq,u32 lvl)1678 static void print_tm_topo(struct seq_file *m, u64 schq, u32 lvl)
1679 {
1680 	struct nix_hw *nix_hw = m->private;
1681 	struct rvu *rvu = nix_hw->rvu;
1682 	int blkaddr, link, link_level;
1683 	struct rvu_hwinfo *hw;
1684 
1685 	hw = rvu->hw;
1686 	blkaddr = nix_hw->blkaddr;
1687 	if (lvl == NIX_TXSCH_LVL_MDQ) {
1688 		seq_printf(m, "NIX_AF_SMQ[%llu]_CFG =0x%llx\n", schq,
1689 			   rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)));
1690 		seq_printf(m, "NIX_AF_SMQ[%llu]_STATUS =0x%llx\n", schq,
1691 			   rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(schq)));
1692 		seq_printf(m, "NIX_AF_MDQ[%llu]_OUT_MD_COUNT =0x%llx\n", schq,
1693 			   rvu_read64(rvu, blkaddr,
1694 				      NIX_AF_MDQX_OUT_MD_COUNT(schq)));
1695 		seq_printf(m, "NIX_AF_MDQ[%llu]_SCHEDULE =0x%llx\n", schq,
1696 			   rvu_read64(rvu, blkaddr,
1697 				      NIX_AF_MDQX_SCHEDULE(schq)));
1698 		seq_printf(m, "NIX_AF_MDQ[%llu]_SHAPE =0x%llx\n", schq,
1699 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SHAPE(schq)));
1700 		seq_printf(m, "NIX_AF_MDQ[%llu]_CIR =0x%llx\n", schq,
1701 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_CIR(schq)));
1702 		seq_printf(m, "NIX_AF_MDQ[%llu]_PIR =0x%llx\n", schq,
1703 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PIR(schq)));
1704 		seq_printf(m, "NIX_AF_MDQ[%llu]_SW_XOFF =0x%llx\n", schq,
1705 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(schq)));
1706 		seq_printf(m, "NIX_AF_MDQ[%llu]_PARENT =0x%llx\n", schq,
1707 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq)));
1708 		seq_puts(m, "\n");
1709 	}
1710 
1711 	if (lvl == NIX_TXSCH_LVL_TL4) {
1712 		seq_printf(m, "NIX_AF_TL4[%llu]_SDP_LINK_CFG =0x%llx\n", schq,
1713 			   rvu_read64(rvu, blkaddr,
1714 				      NIX_AF_TL4X_SDP_LINK_CFG(schq)));
1715 		seq_printf(m, "NIX_AF_TL4[%llu]_SCHEDULE =0x%llx\n", schq,
1716 			   rvu_read64(rvu, blkaddr,
1717 				      NIX_AF_TL4X_SCHEDULE(schq)));
1718 		seq_printf(m, "NIX_AF_TL4[%llu]_SHAPE =0x%llx\n", schq,
1719 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SHAPE(schq)));
1720 		seq_printf(m, "NIX_AF_TL4[%llu]_CIR =0x%llx\n", schq,
1721 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_CIR(schq)));
1722 		seq_printf(m, "NIX_AF_TL4[%llu]_PIR =0x%llx\n", schq,
1723 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PIR(schq)));
1724 		seq_printf(m, "NIX_AF_TL4[%llu]_SW_XOFF =0x%llx\n", schq,
1725 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(schq)));
1726 		seq_printf(m, "NIX_AF_TL4[%llu]_TOPOLOGY =0x%llx\n", schq,
1727 			   rvu_read64(rvu, blkaddr,
1728 				      NIX_AF_TL4X_TOPOLOGY(schq)));
1729 		seq_printf(m, "NIX_AF_TL4[%llu]_PARENT =0x%llx\n", schq,
1730 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(schq)));
1731 		seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1732 			   rvu_read64(rvu, blkaddr,
1733 				      NIX_AF_TL4X_MD_DEBUG0(schq)));
1734 		seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1735 			   rvu_read64(rvu, blkaddr,
1736 				      NIX_AF_TL4X_MD_DEBUG1(schq)));
1737 		seq_puts(m, "\n");
1738 	}
1739 
1740 	if (lvl == NIX_TXSCH_LVL_TL3) {
1741 		seq_printf(m, "NIX_AF_TL3[%llu]_SCHEDULE =0x%llx\n", schq,
1742 			   rvu_read64(rvu, blkaddr,
1743 				      NIX_AF_TL3X_SCHEDULE(schq)));
1744 		seq_printf(m, "NIX_AF_TL3[%llu]_SHAPE =0x%llx\n", schq,
1745 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SHAPE(schq)));
1746 		seq_printf(m, "NIX_AF_TL3[%llu]_CIR =0x%llx\n", schq,
1747 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_CIR(schq)));
1748 		seq_printf(m, "NIX_AF_TL3[%llu]_PIR =0x%llx\n", schq,
1749 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PIR(schq)));
1750 		seq_printf(m, "NIX_AF_TL3[%llu]_SW_XOFF =0x%llx\n", schq,
1751 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(schq)));
1752 		seq_printf(m, "NIX_AF_TL3[%llu]_TOPOLOGY =0x%llx\n", schq,
1753 			   rvu_read64(rvu, blkaddr,
1754 				      NIX_AF_TL3X_TOPOLOGY(schq)));
1755 		seq_printf(m, "NIX_AF_TL3[%llu]_PARENT =0x%llx\n", schq,
1756 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(schq)));
1757 		seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1758 			   rvu_read64(rvu, blkaddr,
1759 				      NIX_AF_TL3X_MD_DEBUG0(schq)));
1760 		seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1761 			   rvu_read64(rvu, blkaddr,
1762 				      NIX_AF_TL3X_MD_DEBUG1(schq)));
1763 
1764 		link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
1765 				& 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1766 		if (lvl == link_level) {
1767 			seq_printf(m,
1768 				   "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
1769 				   schq, rvu_read64(rvu, blkaddr,
1770 				   NIX_AF_TL3_TL2X_BP_STATUS(schq)));
1771 			for (link = 0; link < hw->cgx_links; link++)
1772 				seq_printf(m,
1773 					   "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
1774 					   schq, link,
1775 					   rvu_read64(rvu, blkaddr,
1776 						      NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
1777 		}
1778 		seq_puts(m, "\n");
1779 	}
1780 
1781 	if (lvl == NIX_TXSCH_LVL_TL2) {
1782 		seq_printf(m, "NIX_AF_TL2[%llu]_SHAPE =0x%llx\n", schq,
1783 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SHAPE(schq)));
1784 		seq_printf(m, "NIX_AF_TL2[%llu]_CIR =0x%llx\n", schq,
1785 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_CIR(schq)));
1786 		seq_printf(m, "NIX_AF_TL2[%llu]_PIR =0x%llx\n", schq,
1787 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PIR(schq)));
1788 		seq_printf(m, "NIX_AF_TL2[%llu]_SW_XOFF =0x%llx\n", schq,
1789 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(schq)));
1790 		seq_printf(m, "NIX_AF_TL2[%llu]_TOPOLOGY =0x%llx\n", schq,
1791 			   rvu_read64(rvu, blkaddr,
1792 				      NIX_AF_TL2X_TOPOLOGY(schq)));
1793 		seq_printf(m, "NIX_AF_TL2[%llu]_PARENT =0x%llx\n", schq,
1794 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(schq)));
1795 		seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1796 			   rvu_read64(rvu, blkaddr,
1797 				      NIX_AF_TL2X_MD_DEBUG0(schq)));
1798 		seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1799 			   rvu_read64(rvu, blkaddr,
1800 				      NIX_AF_TL2X_MD_DEBUG1(schq)));
1801 
1802 		link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
1803 				& 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1804 		if (lvl == link_level) {
1805 			seq_printf(m,
1806 				   "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
1807 				   schq, rvu_read64(rvu, blkaddr,
1808 				   NIX_AF_TL3_TL2X_BP_STATUS(schq)));
1809 			for (link = 0; link < hw->cgx_links; link++)
1810 				seq_printf(m,
1811 					   "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
1812 					   schq, link, rvu_read64(rvu, blkaddr,
1813 					   NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
1814 		}
1815 		seq_puts(m, "\n");
1816 	}
1817 
1818 	if (lvl == NIX_TXSCH_LVL_TL1) {
1819 		seq_printf(m, "NIX_AF_TX_LINK[%llu]_NORM_CREDIT =0x%llx\n",
1820 			   schq,
1821 			   rvu_read64(rvu, blkaddr,
1822 				      NIX_AF_TX_LINKX_NORM_CREDIT(schq)));
1823 		seq_printf(m, "NIX_AF_TX_LINK[%llu]_HW_XOFF =0x%llx\n", schq,
1824 			   rvu_read64(rvu, blkaddr,
1825 				      NIX_AF_TX_LINKX_HW_XOFF(schq)));
1826 		seq_printf(m, "NIX_AF_TL1[%llu]_SCHEDULE =0x%llx\n", schq,
1827 			   rvu_read64(rvu, blkaddr,
1828 				      NIX_AF_TL1X_SCHEDULE(schq)));
1829 		seq_printf(m, "NIX_AF_TL1[%llu]_SHAPE =0x%llx\n", schq,
1830 			   rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SHAPE(schq)));
1831 		seq_printf(m, "NIX_AF_TL1[%llu]_CIR =0x%llx\n", schq,
1832 			   rvu_read64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq)));
1833 		seq_printf(m, "NIX_AF_TL1[%llu]_SW_XOFF =0x%llx\n", schq,
1834 			   rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)));
1835 		seq_printf(m, "NIX_AF_TL1[%llu]_TOPOLOGY =0x%llx\n", schq,
1836 			   rvu_read64(rvu, blkaddr,
1837 				      NIX_AF_TL1X_TOPOLOGY(schq)));
1838 		seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1839 			   rvu_read64(rvu, blkaddr,
1840 				      NIX_AF_TL1X_MD_DEBUG0(schq)));
1841 		seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1842 			   rvu_read64(rvu, blkaddr,
1843 				      NIX_AF_TL1X_MD_DEBUG1(schq)));
1844 		seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_PACKETS =0x%llx\n",
1845 			   schq,
1846 			   rvu_read64(rvu, blkaddr,
1847 				      NIX_AF_TL1X_DROPPED_PACKETS(schq)));
1848 		seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_BYTES =0x%llx\n", schq,
1849 			   rvu_read64(rvu, blkaddr,
1850 				      NIX_AF_TL1X_DROPPED_BYTES(schq)));
1851 		seq_printf(m, "NIX_AF_TL1[%llu]_RED_PACKETS =0x%llx\n", schq,
1852 			   rvu_read64(rvu, blkaddr,
1853 				      NIX_AF_TL1X_RED_PACKETS(schq)));
1854 		seq_printf(m, "NIX_AF_TL1[%llu]_RED_BYTES =0x%llx\n", schq,
1855 			   rvu_read64(rvu, blkaddr,
1856 				      NIX_AF_TL1X_RED_BYTES(schq)));
1857 		seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_PACKETS =0x%llx\n", schq,
1858 			   rvu_read64(rvu, blkaddr,
1859 				      NIX_AF_TL1X_YELLOW_PACKETS(schq)));
1860 		seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_BYTES =0x%llx\n", schq,
1861 			   rvu_read64(rvu, blkaddr,
1862 				      NIX_AF_TL1X_YELLOW_BYTES(schq)));
1863 		seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_PACKETS =0x%llx\n", schq,
1864 			   rvu_read64(rvu, blkaddr,
1865 				      NIX_AF_TL1X_GREEN_PACKETS(schq)));
1866 		seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_BYTES =0x%llx\n", schq,
1867 			   rvu_read64(rvu, blkaddr,
1868 				      NIX_AF_TL1X_GREEN_BYTES(schq)));
1869 		seq_puts(m, "\n");
1870 	}
1871 }
1872 
1873 /*dumps given tm_topo registers*/
rvu_dbg_nix_tm_topo_display(struct seq_file * m,void * unused)1874 static int rvu_dbg_nix_tm_topo_display(struct seq_file *m, void *unused)
1875 {
1876 	struct nix_hw *nix_hw = m->private;
1877 	struct rvu *rvu = nix_hw->rvu;
1878 	struct nix_aq_enq_req aq_req;
1879 	struct nix_txsch *txsch;
1880 	int nixlf, lvl, schq;
1881 	u16 pcifunc;
1882 
1883 	nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
1884 
1885 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1886 		return -EINVAL;
1887 
1888 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1889 	aq_req.hdr.pcifunc = pcifunc;
1890 	aq_req.ctype = NIX_AQ_CTYPE_SQ;
1891 	aq_req.op = NIX_AQ_INSTOP_READ;
1892 	seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
1893 
1894 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1895 		txsch = &nix_hw->txsch[lvl];
1896 		for (schq = 0; schq < txsch->schq.max; schq++) {
1897 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) == pcifunc)
1898 				print_tm_topo(m, schq, lvl);
1899 		}
1900 	}
1901 	return 0;
1902 }
1903 
rvu_dbg_nix_tm_topo_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1904 static ssize_t rvu_dbg_nix_tm_topo_write(struct file *filp,
1905 					 const char __user *buffer,
1906 					 size_t count, loff_t *ppos)
1907 {
1908 	struct seq_file *m = filp->private_data;
1909 	struct nix_hw *nix_hw = m->private;
1910 	struct rvu *rvu = nix_hw->rvu;
1911 	struct rvu_pfvf *pfvf;
1912 	u16 pcifunc;
1913 	u64 nixlf;
1914 	int ret;
1915 
1916 	ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
1917 	if (ret)
1918 		return ret;
1919 
1920 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1921 		return -EINVAL;
1922 
1923 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1924 	if (!pfvf->sq_ctx) {
1925 		dev_warn(rvu->dev, "SQ context is not initialized\n");
1926 		return -EINVAL;
1927 	}
1928 
1929 	rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
1930 	return count;
1931 }
1932 
1933 RVU_DEBUG_SEQ_FOPS(nix_tm_topo, nix_tm_topo_display, nix_tm_topo_write);
1934 
1935 /* Dumps given nix_sq's context */
print_nix_sq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1936 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1937 {
1938 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1939 	struct nix_hw *nix_hw = m->private;
1940 	struct rvu *rvu = nix_hw->rvu;
1941 
1942 	if (!is_rvu_otx2(rvu)) {
1943 		print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1944 		return;
1945 	}
1946 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1947 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
1948 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1949 		   sq_ctx->sdp_mcast, sq_ctx->substream);
1950 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1951 		   sq_ctx->qint_idx, sq_ctx->ena);
1952 
1953 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1954 		   sq_ctx->sqb_count, sq_ctx->default_chan);
1955 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1956 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1957 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1958 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1959 
1960 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1961 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1962 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1963 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
1964 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1965 
1966 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1967 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1968 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1969 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1970 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1971 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
1972 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1973 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1974 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1975 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1976 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1977 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1978 
1979 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1980 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1981 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1982 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1983 		   sq_ctx->smenq_next_sqb);
1984 
1985 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1986 
1987 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1988 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1989 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1990 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1991 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1992 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1993 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1994 
1995 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1996 		   (u64)sq_ctx->scm_lso_rem);
1997 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1998 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1999 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
2000 		   (u64)sq_ctx->dropped_octs);
2001 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
2002 		   (u64)sq_ctx->dropped_pkts);
2003 }
2004 
print_nix_cn10k_rq_ctx(struct seq_file * m,struct nix_cn10k_rq_ctx_s * rq_ctx)2005 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
2006 				   struct nix_cn10k_rq_ctx_s *rq_ctx)
2007 {
2008 	seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
2009 		   rq_ctx->ena, rq_ctx->sso_ena);
2010 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
2011 		   rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
2012 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
2013 		   rq_ctx->cq, rq_ctx->lenerr_dis);
2014 	seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
2015 		   rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
2016 	seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
2017 		   rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
2018 	seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
2019 		   rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
2020 	seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
2021 
2022 	seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
2023 		   rq_ctx->spb_aura, rq_ctx->lpb_aura);
2024 	seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
2025 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
2026 		   rq_ctx->sso_grp, rq_ctx->sso_tt);
2027 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
2028 		   rq_ctx->pb_caching, rq_ctx->wqe_caching);
2029 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
2030 		   rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
2031 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
2032 		   rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
2033 	seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
2034 		   rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
2035 
2036 	seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
2037 	seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
2038 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
2039 	seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
2040 		   rq_ctx->wqe_skip, rq_ctx->spb_ena);
2041 	seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
2042 		   rq_ctx->lpb_sizem1, rq_ctx->first_skip);
2043 	seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
2044 		   rq_ctx->later_skip, rq_ctx->xqe_imm_size);
2045 	seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
2046 		   rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
2047 
2048 	seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
2049 		   rq_ctx->xqe_drop, rq_ctx->xqe_pass);
2050 	seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
2051 		   rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
2052 	seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
2053 		   rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
2054 	seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
2055 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
2056 
2057 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
2058 		   rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
2059 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
2060 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
2061 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
2062 		   rq_ctx->rq_int, rq_ctx->rq_int_ena);
2063 	seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
2064 
2065 	seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
2066 		   rq_ctx->ltag, rq_ctx->good_utag);
2067 	seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
2068 		   rq_ctx->bad_utag, rq_ctx->flow_tagw);
2069 	seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
2070 		   rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
2071 	seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
2072 		   rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
2073 	seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
2074 
2075 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
2076 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
2077 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
2078 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
2079 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
2080 }
2081 
2082 /* Dumps given nix_rq's context */
print_nix_rq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)2083 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2084 {
2085 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
2086 	struct nix_hw *nix_hw = m->private;
2087 	struct rvu *rvu = nix_hw->rvu;
2088 
2089 	if (!is_rvu_otx2(rvu)) {
2090 		print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
2091 		return;
2092 	}
2093 
2094 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
2095 		   rq_ctx->wqe_aura, rq_ctx->substream);
2096 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
2097 		   rq_ctx->cq, rq_ctx->ena_wqwd);
2098 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
2099 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
2100 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
2101 
2102 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
2103 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
2104 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
2105 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
2106 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
2107 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
2108 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
2109 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
2110 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
2111 
2112 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
2113 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
2114 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
2115 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
2116 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
2117 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
2118 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
2119 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
2120 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
2121 
2122 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
2123 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
2124 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
2125 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
2126 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
2127 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
2128 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
2129 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
2130 
2131 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
2132 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
2133 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
2134 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
2135 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
2136 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
2137 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
2138 
2139 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
2140 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
2141 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
2142 		   rq_ctx->good_utag, rq_ctx->ltag);
2143 
2144 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
2145 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
2146 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
2147 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
2148 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
2149 }
2150 
2151 /* Dumps given nix_cq's context */
print_nix_cq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)2152 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2153 {
2154 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
2155 	struct nix_hw *nix_hw = m->private;
2156 	struct rvu *rvu = nix_hw->rvu;
2157 
2158 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
2159 
2160 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
2161 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
2162 		   cq_ctx->avg_con, cq_ctx->cint_idx);
2163 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
2164 		   cq_ctx->cq_err, cq_ctx->qint_idx);
2165 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
2166 		   cq_ctx->bpid, cq_ctx->bp_ena);
2167 
2168 	if (!is_rvu_otx2(rvu)) {
2169 		seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
2170 		seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
2171 		seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
2172 		seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
2173 			   cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
2174 			   cq_ctx->lbpid_low);
2175 		seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
2176 	}
2177 
2178 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
2179 		   cq_ctx->update_time, cq_ctx->avg_level);
2180 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
2181 		   cq_ctx->head, cq_ctx->tail);
2182 
2183 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
2184 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
2185 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
2186 		   cq_ctx->qsize, cq_ctx->caching);
2187 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
2188 		   cq_ctx->substream, cq_ctx->ena);
2189 	if (!is_rvu_otx2(rvu)) {
2190 		seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
2191 		seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
2192 			   cq_ctx->cpt_drop_err_en);
2193 	}
2194 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
2195 		   cq_ctx->drop_ena, cq_ctx->drop);
2196 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
2197 }
2198 
rvu_dbg_nix_queue_ctx_display(struct seq_file * filp,void * unused,int ctype)2199 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
2200 					 void *unused, int ctype)
2201 {
2202 	void (*print_nix_ctx)(struct seq_file *filp,
2203 			      struct nix_aq_enq_rsp *rsp) = NULL;
2204 	struct nix_hw *nix_hw = filp->private;
2205 	struct rvu *rvu = nix_hw->rvu;
2206 	struct nix_aq_enq_req aq_req;
2207 	struct nix_aq_enq_rsp rsp;
2208 	char *ctype_string = NULL;
2209 	int qidx, rc, max_id = 0;
2210 	struct rvu_pfvf *pfvf;
2211 	int nixlf, id, all;
2212 	u16 pcifunc;
2213 
2214 	switch (ctype) {
2215 	case NIX_AQ_CTYPE_CQ:
2216 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
2217 		id = rvu->rvu_dbg.nix_cq_ctx.id;
2218 		all = rvu->rvu_dbg.nix_cq_ctx.all;
2219 		break;
2220 
2221 	case NIX_AQ_CTYPE_SQ:
2222 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
2223 		id = rvu->rvu_dbg.nix_sq_ctx.id;
2224 		all = rvu->rvu_dbg.nix_sq_ctx.all;
2225 		break;
2226 
2227 	case NIX_AQ_CTYPE_RQ:
2228 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
2229 		id = rvu->rvu_dbg.nix_rq_ctx.id;
2230 		all = rvu->rvu_dbg.nix_rq_ctx.all;
2231 		break;
2232 
2233 	default:
2234 		return -EINVAL;
2235 	}
2236 
2237 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
2238 		return -EINVAL;
2239 
2240 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2241 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
2242 		seq_puts(filp, "SQ context is not initialized\n");
2243 		return -EINVAL;
2244 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
2245 		seq_puts(filp, "RQ context is not initialized\n");
2246 		return -EINVAL;
2247 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
2248 		seq_puts(filp, "CQ context is not initialized\n");
2249 		return -EINVAL;
2250 	}
2251 
2252 	if (ctype == NIX_AQ_CTYPE_SQ) {
2253 		max_id = pfvf->sq_ctx->qsize;
2254 		ctype_string = "sq";
2255 		print_nix_ctx = print_nix_sq_ctx;
2256 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
2257 		max_id = pfvf->rq_ctx->qsize;
2258 		ctype_string = "rq";
2259 		print_nix_ctx = print_nix_rq_ctx;
2260 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
2261 		max_id = pfvf->cq_ctx->qsize;
2262 		ctype_string = "cq";
2263 		print_nix_ctx = print_nix_cq_ctx;
2264 	}
2265 
2266 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
2267 	aq_req.hdr.pcifunc = pcifunc;
2268 	aq_req.ctype = ctype;
2269 	aq_req.op = NIX_AQ_INSTOP_READ;
2270 	if (all)
2271 		id = 0;
2272 	else
2273 		max_id = id + 1;
2274 	for (qidx = id; qidx < max_id; qidx++) {
2275 		aq_req.qidx = qidx;
2276 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
2277 			   ctype_string, nixlf, aq_req.qidx);
2278 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
2279 		if (rc) {
2280 			seq_puts(filp, "Failed to read the context\n");
2281 			return -EINVAL;
2282 		}
2283 		print_nix_ctx(filp, &rsp);
2284 	}
2285 	return 0;
2286 }
2287 
write_nix_queue_ctx(struct rvu * rvu,bool all,int nixlf,int id,int ctype,char * ctype_string,struct seq_file * m)2288 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
2289 			       int id, int ctype, char *ctype_string,
2290 			       struct seq_file *m)
2291 {
2292 	struct nix_hw *nix_hw = m->private;
2293 	struct rvu_pfvf *pfvf;
2294 	int max_id = 0;
2295 	u16 pcifunc;
2296 
2297 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
2298 		return -EINVAL;
2299 
2300 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2301 
2302 	if (ctype == NIX_AQ_CTYPE_SQ) {
2303 		if (!pfvf->sq_ctx) {
2304 			dev_warn(rvu->dev, "SQ context is not initialized\n");
2305 			return -EINVAL;
2306 		}
2307 		max_id = pfvf->sq_ctx->qsize;
2308 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
2309 		if (!pfvf->rq_ctx) {
2310 			dev_warn(rvu->dev, "RQ context is not initialized\n");
2311 			return -EINVAL;
2312 		}
2313 		max_id = pfvf->rq_ctx->qsize;
2314 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
2315 		if (!pfvf->cq_ctx) {
2316 			dev_warn(rvu->dev, "CQ context is not initialized\n");
2317 			return -EINVAL;
2318 		}
2319 		max_id = pfvf->cq_ctx->qsize;
2320 	}
2321 
2322 	if (id < 0 || id >= max_id) {
2323 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
2324 			 ctype_string, max_id - 1);
2325 		return -EINVAL;
2326 	}
2327 	switch (ctype) {
2328 	case NIX_AQ_CTYPE_CQ:
2329 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
2330 		rvu->rvu_dbg.nix_cq_ctx.id = id;
2331 		rvu->rvu_dbg.nix_cq_ctx.all = all;
2332 		break;
2333 
2334 	case NIX_AQ_CTYPE_SQ:
2335 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
2336 		rvu->rvu_dbg.nix_sq_ctx.id = id;
2337 		rvu->rvu_dbg.nix_sq_ctx.all = all;
2338 		break;
2339 
2340 	case NIX_AQ_CTYPE_RQ:
2341 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
2342 		rvu->rvu_dbg.nix_rq_ctx.id = id;
2343 		rvu->rvu_dbg.nix_rq_ctx.all = all;
2344 		break;
2345 	default:
2346 		return -EINVAL;
2347 	}
2348 	return 0;
2349 }
2350 
rvu_dbg_nix_queue_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)2351 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
2352 					   const char __user *buffer,
2353 					   size_t count, loff_t *ppos,
2354 					   int ctype)
2355 {
2356 	struct seq_file *m = filp->private_data;
2357 	struct nix_hw *nix_hw = m->private;
2358 	struct rvu *rvu = nix_hw->rvu;
2359 	char *cmd_buf, *ctype_string;
2360 	int nixlf, id = 0, ret;
2361 	bool all = false;
2362 
2363 	if ((*ppos != 0) || !count)
2364 		return -EINVAL;
2365 
2366 	switch (ctype) {
2367 	case NIX_AQ_CTYPE_SQ:
2368 		ctype_string = "sq";
2369 		break;
2370 	case NIX_AQ_CTYPE_RQ:
2371 		ctype_string = "rq";
2372 		break;
2373 	case NIX_AQ_CTYPE_CQ:
2374 		ctype_string = "cq";
2375 		break;
2376 	default:
2377 		return -EINVAL;
2378 	}
2379 
2380 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
2381 
2382 	if (!cmd_buf)
2383 		return count;
2384 
2385 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
2386 				   &nixlf, &id, &all);
2387 	if (ret < 0) {
2388 		dev_info(rvu->dev,
2389 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
2390 			 ctype_string, ctype_string);
2391 		goto done;
2392 	} else {
2393 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
2394 					  ctype_string, m);
2395 	}
2396 done:
2397 	kfree(cmd_buf);
2398 	return ret ? ret : count;
2399 }
2400 
rvu_dbg_nix_sq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2401 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
2402 					const char __user *buffer,
2403 					size_t count, loff_t *ppos)
2404 {
2405 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2406 					    NIX_AQ_CTYPE_SQ);
2407 }
2408 
rvu_dbg_nix_sq_ctx_display(struct seq_file * filp,void * unused)2409 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
2410 {
2411 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
2412 }
2413 
2414 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
2415 
rvu_dbg_nix_rq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2416 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
2417 					const char __user *buffer,
2418 					size_t count, loff_t *ppos)
2419 {
2420 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2421 					    NIX_AQ_CTYPE_RQ);
2422 }
2423 
rvu_dbg_nix_rq_ctx_display(struct seq_file * filp,void * unused)2424 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
2425 {
2426 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
2427 }
2428 
2429 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
2430 
rvu_dbg_nix_cq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2431 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
2432 					const char __user *buffer,
2433 					size_t count, loff_t *ppos)
2434 {
2435 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2436 					    NIX_AQ_CTYPE_CQ);
2437 }
2438 
rvu_dbg_nix_cq_ctx_display(struct seq_file * filp,void * unused)2439 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
2440 {
2441 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
2442 }
2443 
2444 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
2445 
print_nix_qctx_qsize(struct seq_file * filp,int qsize,unsigned long * bmap,char * qtype)2446 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
2447 				 unsigned long *bmap, char *qtype)
2448 {
2449 	char *buf;
2450 
2451 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2452 	if (!buf)
2453 		return;
2454 
2455 	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
2456 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
2457 	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
2458 		   qtype, buf);
2459 	kfree(buf);
2460 }
2461 
print_nix_qsize(struct seq_file * filp,struct rvu_pfvf * pfvf)2462 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
2463 {
2464 	if (!pfvf->cq_ctx)
2465 		seq_puts(filp, "cq context is not initialized\n");
2466 	else
2467 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
2468 				     "cq");
2469 
2470 	if (!pfvf->rq_ctx)
2471 		seq_puts(filp, "rq context is not initialized\n");
2472 	else
2473 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
2474 				     "rq");
2475 
2476 	if (!pfvf->sq_ctx)
2477 		seq_puts(filp, "sq context is not initialized\n");
2478 	else
2479 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
2480 				     "sq");
2481 }
2482 
rvu_dbg_nix_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2483 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
2484 				       const char __user *buffer,
2485 				       size_t count, loff_t *ppos)
2486 {
2487 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
2488 				   BLKTYPE_NIX);
2489 }
2490 
rvu_dbg_nix_qsize_display(struct seq_file * filp,void * unused)2491 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
2492 {
2493 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
2494 }
2495 
2496 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
2497 
print_band_prof_ctx(struct seq_file * m,struct nix_bandprof_s * prof)2498 static void print_band_prof_ctx(struct seq_file *m,
2499 				struct nix_bandprof_s *prof)
2500 {
2501 	char *str;
2502 
2503 	switch (prof->pc_mode) {
2504 	case NIX_RX_PC_MODE_VLAN:
2505 		str = "VLAN";
2506 		break;
2507 	case NIX_RX_PC_MODE_DSCP:
2508 		str = "DSCP";
2509 		break;
2510 	case NIX_RX_PC_MODE_GEN:
2511 		str = "Generic";
2512 		break;
2513 	case NIX_RX_PC_MODE_RSVD:
2514 		str = "Reserved";
2515 		break;
2516 	}
2517 	seq_printf(m, "W0: pc_mode\t\t%s\n", str);
2518 	str = (prof->icolor == 3) ? "Color blind" :
2519 		(prof->icolor == 0) ? "Green" :
2520 		(prof->icolor == 1) ? "Yellow" : "Red";
2521 	seq_printf(m, "W0: icolor\t\t%s\n", str);
2522 	seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
2523 	seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
2524 	seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
2525 	seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
2526 	seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
2527 	seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
2528 	seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
2529 	seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
2530 
2531 	seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
2532 	str = (prof->lmode == 0) ? "byte" : "packet";
2533 	seq_printf(m, "W1: lmode\t\t%s\n", str);
2534 	seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
2535 	seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
2536 	seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
2537 	seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
2538 	str = (prof->gc_action == 0) ? "PASS" :
2539 		(prof->gc_action == 1) ? "DROP" : "RED";
2540 	seq_printf(m, "W1: gc_action\t\t%s\n", str);
2541 	str = (prof->yc_action == 0) ? "PASS" :
2542 		(prof->yc_action == 1) ? "DROP" : "RED";
2543 	seq_printf(m, "W1: yc_action\t\t%s\n", str);
2544 	str = (prof->rc_action == 0) ? "PASS" :
2545 		(prof->rc_action == 1) ? "DROP" : "RED";
2546 	seq_printf(m, "W1: rc_action\t\t%s\n", str);
2547 	seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
2548 	seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
2549 	seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
2550 
2551 	seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
2552 	seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
2553 	seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
2554 	seq_printf(m, "W4: green_pkt_pass\t%lld\n",
2555 		   (u64)prof->green_pkt_pass);
2556 	seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
2557 		   (u64)prof->yellow_pkt_pass);
2558 	seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
2559 	seq_printf(m, "W7: green_octs_pass\t%lld\n",
2560 		   (u64)prof->green_octs_pass);
2561 	seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
2562 		   (u64)prof->yellow_octs_pass);
2563 	seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
2564 	seq_printf(m, "W10: green_pkt_drop\t%lld\n",
2565 		   (u64)prof->green_pkt_drop);
2566 	seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
2567 		   (u64)prof->yellow_pkt_drop);
2568 	seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
2569 	seq_printf(m, "W13: green_octs_drop\t%lld\n",
2570 		   (u64)prof->green_octs_drop);
2571 	seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
2572 		   (u64)prof->yellow_octs_drop);
2573 	seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
2574 	seq_puts(m, "==============================\n");
2575 }
2576 
rvu_dbg_nix_band_prof_ctx_display(struct seq_file * m,void * unused)2577 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
2578 {
2579 	struct nix_hw *nix_hw = m->private;
2580 	struct nix_cn10k_aq_enq_req aq_req;
2581 	struct nix_cn10k_aq_enq_rsp aq_rsp;
2582 	struct rvu *rvu = nix_hw->rvu;
2583 	struct nix_ipolicer *ipolicer;
2584 	int layer, prof_idx, idx, rc;
2585 	u16 pcifunc;
2586 	char *str;
2587 
2588 	/* Ingress policers do not exist on all platforms */
2589 	if (!nix_hw->ipolicer)
2590 		return 0;
2591 
2592 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2593 		if (layer == BAND_PROF_INVAL_LAYER)
2594 			continue;
2595 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2596 			(layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
2597 
2598 		seq_printf(m, "\n%s bandwidth profiles\n", str);
2599 		seq_puts(m, "=======================\n");
2600 
2601 		ipolicer = &nix_hw->ipolicer[layer];
2602 
2603 		for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
2604 			if (is_rsrc_free(&ipolicer->band_prof, idx))
2605 				continue;
2606 
2607 			prof_idx = (idx & 0x3FFF) | (layer << 14);
2608 			rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
2609 						 0x00, NIX_AQ_CTYPE_BANDPROF,
2610 						 prof_idx);
2611 			if (rc) {
2612 				dev_err(rvu->dev,
2613 					"%s: Failed to fetch context of %s profile %d, err %d\n",
2614 					__func__, str, idx, rc);
2615 				return 0;
2616 			}
2617 			seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
2618 			pcifunc = ipolicer->pfvf_map[idx];
2619 			if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2620 				seq_printf(m, "Allocated to :: PF %d\n",
2621 					   rvu_get_pf(pcifunc));
2622 			else
2623 				seq_printf(m, "Allocated to :: PF %d VF %d\n",
2624 					   rvu_get_pf(pcifunc),
2625 					   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2626 			print_band_prof_ctx(m, &aq_rsp.prof);
2627 		}
2628 	}
2629 	return 0;
2630 }
2631 
2632 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
2633 
rvu_dbg_nix_band_prof_rsrc_display(struct seq_file * m,void * unused)2634 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
2635 {
2636 	struct nix_hw *nix_hw = m->private;
2637 	struct nix_ipolicer *ipolicer;
2638 	int layer;
2639 	char *str;
2640 
2641 	/* Ingress policers do not exist on all platforms */
2642 	if (!nix_hw->ipolicer)
2643 		return 0;
2644 
2645 	seq_puts(m, "\nBandwidth profile resource free count\n");
2646 	seq_puts(m, "=====================================\n");
2647 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2648 		if (layer == BAND_PROF_INVAL_LAYER)
2649 			continue;
2650 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2651 			(layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
2652 
2653 		ipolicer = &nix_hw->ipolicer[layer];
2654 		seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
2655 			   ipolicer->band_prof.max,
2656 			   rvu_rsrc_free_count(&ipolicer->band_prof));
2657 	}
2658 	seq_puts(m, "=====================================\n");
2659 
2660 	return 0;
2661 }
2662 
2663 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
2664 
rvu_dbg_nix_init(struct rvu * rvu,int blkaddr)2665 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
2666 {
2667 	struct nix_hw *nix_hw;
2668 
2669 	if (!is_block_implemented(rvu->hw, blkaddr))
2670 		return;
2671 
2672 	if (blkaddr == BLKADDR_NIX0) {
2673 		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
2674 		nix_hw = &rvu->hw->nix[0];
2675 	} else {
2676 		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
2677 						      rvu->rvu_dbg.root);
2678 		nix_hw = &rvu->hw->nix[1];
2679 	}
2680 
2681 	debugfs_create_file("tm_tree", 0600, rvu->rvu_dbg.nix, nix_hw,
2682 			    &rvu_dbg_nix_tm_tree_fops);
2683 	debugfs_create_file("tm_topo", 0600, rvu->rvu_dbg.nix, nix_hw,
2684 			    &rvu_dbg_nix_tm_topo_fops);
2685 	debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2686 			    &rvu_dbg_nix_sq_ctx_fops);
2687 	debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2688 			    &rvu_dbg_nix_rq_ctx_fops);
2689 	debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2690 			    &rvu_dbg_nix_cq_ctx_fops);
2691 	debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2692 			    &rvu_dbg_nix_ndc_tx_cache_fops);
2693 	debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2694 			    &rvu_dbg_nix_ndc_rx_cache_fops);
2695 	debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2696 			    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
2697 	debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2698 			    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
2699 	debugfs_create_file_aux_num("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2700 			    blkaddr, &rvu_dbg_nix_qsize_fops);
2701 	debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2702 			    &rvu_dbg_nix_band_prof_ctx_fops);
2703 	debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2704 			    &rvu_dbg_nix_band_prof_rsrc_fops);
2705 }
2706 
rvu_dbg_npa_init(struct rvu * rvu)2707 static void rvu_dbg_npa_init(struct rvu *rvu)
2708 {
2709 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2710 
2711 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2712 			    &rvu_dbg_npa_qsize_fops);
2713 	debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2714 			    &rvu_dbg_npa_aura_ctx_fops);
2715 	debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2716 			    &rvu_dbg_npa_pool_ctx_fops);
2717 	debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2718 			    &rvu_dbg_npa_ndc_cache_fops);
2719 	debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2720 			    &rvu_dbg_npa_ndc_hits_miss_fops);
2721 }
2722 
2723 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
2724 	({								\
2725 		u64 cnt;						\
2726 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2727 					     NIX_STATS_RX, &(cnt));	\
2728 		if (!err)						\
2729 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2730 		cnt;							\
2731 	})
2732 
2733 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
2734 	({								\
2735 		u64 cnt;						\
2736 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2737 					  NIX_STATS_TX, &(cnt));	\
2738 		if (!err)						\
2739 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2740 		cnt;							\
2741 	})
2742 
cgx_print_stats(struct seq_file * s,int lmac_id)2743 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2744 {
2745 	struct cgx_link_user_info linfo;
2746 	struct mac_ops *mac_ops;
2747 	void *cgxd = s->private;
2748 	u64 ucast, mcast, bcast;
2749 	int stat = 0, err = 0;
2750 	u64 tx_stat, rx_stat;
2751 	struct rvu *rvu;
2752 
2753 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2754 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2755 	if (!rvu)
2756 		return -ENODEV;
2757 
2758 	mac_ops = get_mac_ops(cgxd);
2759 	/* There can be no CGX devices at all */
2760 	if (!mac_ops)
2761 		return 0;
2762 
2763 	/* Link status */
2764 	seq_puts(s, "\n=======Link Status======\n\n");
2765 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2766 	if (err)
2767 		seq_puts(s, "Failed to read link status\n");
2768 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
2769 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
2770 
2771 	/* Rx stats */
2772 	seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2773 		   mac_ops->name);
2774 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2775 	if (err)
2776 		return err;
2777 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2778 	if (err)
2779 		return err;
2780 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2781 	if (err)
2782 		return err;
2783 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2784 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2785 	if (err)
2786 		return err;
2787 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2788 	if (err)
2789 		return err;
2790 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2791 	if (err)
2792 		return err;
2793 
2794 	/* Tx stats */
2795 	seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2796 		   mac_ops->name);
2797 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2798 	if (err)
2799 		return err;
2800 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2801 	if (err)
2802 		return err;
2803 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2804 	if (err)
2805 		return err;
2806 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2807 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2808 	if (err)
2809 		return err;
2810 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2811 	if (err)
2812 		return err;
2813 
2814 	/* Rx stats */
2815 	seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2816 	while (stat < mac_ops->rx_stats_cnt) {
2817 		err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2818 		if (err)
2819 			return err;
2820 		if (is_rvu_otx2(rvu))
2821 			seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2822 				   rx_stat);
2823 		else
2824 			seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2825 				   rx_stat);
2826 		stat++;
2827 	}
2828 
2829 	/* Tx stats */
2830 	stat = 0;
2831 	seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2832 	while (stat < mac_ops->tx_stats_cnt) {
2833 		err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2834 		if (err)
2835 			return err;
2836 
2837 		if (is_rvu_otx2(rvu))
2838 			seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2839 				   tx_stat);
2840 		else
2841 			seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2842 				   tx_stat);
2843 		stat++;
2844 	}
2845 
2846 	return err;
2847 }
2848 
rvu_dbg_derive_lmacid(struct seq_file * s)2849 static int rvu_dbg_derive_lmacid(struct seq_file *s)
2850 {
2851 	return debugfs_get_aux_num(s->file);
2852 }
2853 
rvu_dbg_cgx_stat_display(struct seq_file * s,void * unused)2854 static int rvu_dbg_cgx_stat_display(struct seq_file *s, void *unused)
2855 {
2856 	return cgx_print_stats(s, rvu_dbg_derive_lmacid(s));
2857 }
2858 
2859 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2860 
cgx_print_dmac_flt(struct seq_file * s,int lmac_id)2861 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2862 {
2863 	struct pci_dev *pdev = NULL;
2864 	void *cgxd = s->private;
2865 	char *bcast, *mcast;
2866 	u16 index, domain;
2867 	u8 dmac[ETH_ALEN];
2868 	struct rvu *rvu;
2869 	u64 cfg, mac;
2870 	int pf;
2871 
2872 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2873 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2874 	if (!rvu)
2875 		return -ENODEV;
2876 
2877 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2878 	domain = 2;
2879 
2880 	pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2881 	if (!pdev)
2882 		return 0;
2883 
2884 	cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2885 	bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2886 	mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2887 
2888 	seq_puts(s,
2889 		 "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2890 	seq_printf(s, "%s  PF%d  %9s  %9s",
2891 		   dev_name(&pdev->dev), pf, bcast, mcast);
2892 	if (cfg & CGX_DMAC_CAM_ACCEPT)
2893 		seq_printf(s, "%12s\n\n", "UNICAST");
2894 	else
2895 		seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2896 
2897 	seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2898 
2899 	for (index = 0 ; index < 32 ; index++) {
2900 		cfg = cgx_read_dmac_entry(cgxd, index);
2901 		/* Display enabled dmac entries associated with current lmac */
2902 		if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2903 		    FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2904 			mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2905 			u64_to_ether_addr(mac, dmac);
2906 			seq_printf(s, "%7d     %pM\n", index, dmac);
2907 		}
2908 	}
2909 
2910 	pci_dev_put(pdev);
2911 	return 0;
2912 }
2913 
rvu_dbg_cgx_dmac_flt_display(struct seq_file * s,void * unused)2914 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused)
2915 {
2916 	return cgx_print_dmac_flt(s, rvu_dbg_derive_lmacid(s));
2917 }
2918 
2919 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2920 
rvu_dbg_cgx_init(struct rvu * rvu)2921 static void rvu_dbg_cgx_init(struct rvu *rvu)
2922 {
2923 	struct mac_ops *mac_ops;
2924 	unsigned long lmac_bmap;
2925 	int i, lmac_id;
2926 	char dname[20];
2927 	void *cgx;
2928 
2929 	if (!cgx_get_cgxcnt_max())
2930 		return;
2931 
2932 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2933 	if (!mac_ops)
2934 		return;
2935 
2936 	rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2937 						   rvu->rvu_dbg.root);
2938 
2939 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2940 		cgx = rvu_cgx_pdata(i, rvu);
2941 		if (!cgx)
2942 			continue;
2943 		lmac_bmap = cgx_get_lmac_bmap(cgx);
2944 		/* cgx debugfs dir */
2945 		sprintf(dname, "%s%d", mac_ops->name, i);
2946 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2947 						      rvu->rvu_dbg.cgx_root);
2948 
2949 		for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
2950 			/* lmac debugfs dir */
2951 			sprintf(dname, "lmac%d", lmac_id);
2952 			rvu->rvu_dbg.lmac =
2953 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2954 
2955 			debugfs_create_file_aux_num("stats", 0600, rvu->rvu_dbg.lmac,
2956 					    cgx, lmac_id, &rvu_dbg_cgx_stat_fops);
2957 			debugfs_create_file_aux_num("mac_filter", 0600,
2958 					    rvu->rvu_dbg.lmac, cgx, lmac_id,
2959 					    &rvu_dbg_cgx_dmac_flt_fops);
2960 		}
2961 	}
2962 }
2963 
2964 /* NPC debugfs APIs */
rvu_print_npc_mcam_info(struct seq_file * s,u16 pcifunc,int blkaddr)2965 static void rvu_print_npc_mcam_info(struct seq_file *s,
2966 				    u16 pcifunc, int blkaddr)
2967 {
2968 	struct rvu *rvu = s->private;
2969 	int entry_acnt, entry_ecnt;
2970 	int cntr_acnt, cntr_ecnt;
2971 
2972 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2973 					  &entry_acnt, &entry_ecnt);
2974 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2975 					    &cntr_acnt, &cntr_ecnt);
2976 	if (!entry_acnt && !cntr_acnt)
2977 		return;
2978 
2979 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2980 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2981 			   rvu_get_pf(pcifunc));
2982 	else
2983 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2984 			   rvu_get_pf(pcifunc),
2985 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2986 
2987 	if (entry_acnt) {
2988 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2989 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2990 	}
2991 	if (cntr_acnt) {
2992 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2993 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2994 	}
2995 }
2996 
rvu_dbg_npc_mcam_info_display(struct seq_file * filp,void * unsued)2997 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2998 {
2999 	struct rvu *rvu = filp->private;
3000 	int pf, vf, numvfs, blkaddr;
3001 	struct npc_mcam *mcam;
3002 	u16 pcifunc, counters;
3003 	u64 cfg;
3004 
3005 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3006 	if (blkaddr < 0)
3007 		return -ENODEV;
3008 
3009 	mcam = &rvu->hw->mcam;
3010 	counters = rvu->hw->npc_counters;
3011 
3012 	seq_puts(filp, "\nNPC MCAM info:\n");
3013 	/* MCAM keywidth on receive and transmit sides */
3014 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
3015 	cfg = (cfg >> 32) & 0x07;
3016 	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
3017 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
3018 		   "224bits" : "448bits"));
3019 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
3020 	cfg = (cfg >> 32) & 0x07;
3021 	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
3022 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
3023 		   "224bits" : "448bits"));
3024 
3025 	mutex_lock(&mcam->lock);
3026 	/* MCAM entries */
3027 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
3028 	seq_printf(filp, "\t\t Reserved \t: %d\n",
3029 		   mcam->total_entries - mcam->bmap_entries);
3030 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
3031 
3032 	/* MCAM counters */
3033 	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
3034 	seq_printf(filp, "\t\t Reserved \t: %d\n",
3035 		   counters - mcam->counters.max);
3036 	seq_printf(filp, "\t\t Available \t: %d\n",
3037 		   rvu_rsrc_free_count(&mcam->counters));
3038 
3039 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
3040 		mutex_unlock(&mcam->lock);
3041 		return 0;
3042 	}
3043 
3044 	seq_puts(filp, "\n\t\t Current allocation\n");
3045 	seq_puts(filp, "\t\t====================\n");
3046 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
3047 		pcifunc = (pf << RVU_PFVF_PF_SHIFT);
3048 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
3049 
3050 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3051 		numvfs = (cfg >> 12) & 0xFF;
3052 		for (vf = 0; vf < numvfs; vf++) {
3053 			pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
3054 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
3055 		}
3056 	}
3057 
3058 	mutex_unlock(&mcam->lock);
3059 	return 0;
3060 }
3061 
3062 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
3063 
rvu_dbg_npc_rx_miss_stats_display(struct seq_file * filp,void * unused)3064 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
3065 					     void *unused)
3066 {
3067 	struct rvu *rvu = filp->private;
3068 	struct npc_mcam *mcam;
3069 	int blkaddr;
3070 
3071 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3072 	if (blkaddr < 0)
3073 		return -ENODEV;
3074 
3075 	mcam = &rvu->hw->mcam;
3076 
3077 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
3078 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
3079 		   rvu_read64(rvu, blkaddr,
3080 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
3081 
3082 	return 0;
3083 }
3084 
3085 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
3086 
3087 #define RVU_DBG_PRINT_MPLS_TTL(pkt, mask)                                     \
3088 do {									      \
3089 	seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt));     \
3090 	seq_printf(s, "mask 0x%lx\n",                                         \
3091 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask));               \
3092 } while (0)                                                                   \
3093 
3094 #define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask)                               \
3095 do {									      \
3096 	typeof(_pkt) (pkt) = (_pkt);					      \
3097 	typeof(_mask) (mask) = (_mask);                                       \
3098 	seq_printf(s, "%ld %ld %ld\n",                                        \
3099 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt),                  \
3100 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt),                  \
3101 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt));                \
3102 	seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n",                           \
3103 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask),                 \
3104 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask),                 \
3105 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask));               \
3106 } while (0)                                                                   \
3107 
rvu_dbg_npc_mcam_show_flows(struct seq_file * s,struct rvu_npc_mcam_rule * rule)3108 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
3109 					struct rvu_npc_mcam_rule *rule)
3110 {
3111 	u8 bit;
3112 
3113 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
3114 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
3115 		switch (bit) {
3116 		case NPC_LXMB:
3117 			if (rule->lxmb == 1)
3118 				seq_puts(s, "\tL2M nibble is set\n");
3119 			else
3120 				seq_puts(s, "\tL2B nibble is set\n");
3121 			break;
3122 		case NPC_DMAC:
3123 			seq_printf(s, "%pM ", rule->packet.dmac);
3124 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
3125 			break;
3126 		case NPC_SMAC:
3127 			seq_printf(s, "%pM ", rule->packet.smac);
3128 			seq_printf(s, "mask %pM\n", rule->mask.smac);
3129 			break;
3130 		case NPC_ETYPE:
3131 			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
3132 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
3133 			break;
3134 		case NPC_OUTER_VID:
3135 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
3136 			seq_printf(s, "mask 0x%x\n",
3137 				   ntohs(rule->mask.vlan_tci));
3138 			break;
3139 		case NPC_INNER_VID:
3140 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
3141 			seq_printf(s, "mask 0x%x\n",
3142 				   ntohs(rule->mask.vlan_itci));
3143 			break;
3144 		case NPC_TOS:
3145 			seq_printf(s, "%d ", rule->packet.tos);
3146 			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
3147 			break;
3148 		case NPC_SIP_IPV4:
3149 			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
3150 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
3151 			break;
3152 		case NPC_DIP_IPV4:
3153 			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
3154 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
3155 			break;
3156 		case NPC_SIP_IPV6:
3157 			seq_printf(s, "%pI6 ", rule->packet.ip6src);
3158 			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
3159 			break;
3160 		case NPC_DIP_IPV6:
3161 			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
3162 			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
3163 			break;
3164 		case NPC_IPFRAG_IPV6:
3165 			seq_printf(s, "0x%x ", rule->packet.next_header);
3166 			seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
3167 			break;
3168 		case NPC_IPFRAG_IPV4:
3169 			seq_printf(s, "0x%x ", rule->packet.ip_flag);
3170 			seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
3171 			break;
3172 		case NPC_SPORT_TCP:
3173 		case NPC_SPORT_UDP:
3174 		case NPC_SPORT_SCTP:
3175 			seq_printf(s, "%d ", ntohs(rule->packet.sport));
3176 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
3177 			break;
3178 		case NPC_DPORT_TCP:
3179 		case NPC_DPORT_UDP:
3180 		case NPC_DPORT_SCTP:
3181 			seq_printf(s, "%d ", ntohs(rule->packet.dport));
3182 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
3183 			break;
3184 		case NPC_TCP_FLAGS:
3185 			seq_printf(s, "%d ", rule->packet.tcp_flags);
3186 			seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
3187 			break;
3188 		case NPC_IPSEC_SPI:
3189 			seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
3190 			seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
3191 			break;
3192 		case NPC_MPLS1_LBTCBOS:
3193 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0],
3194 						   rule->mask.mpls_lse[0]);
3195 			break;
3196 		case NPC_MPLS1_TTL:
3197 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0],
3198 					       rule->mask.mpls_lse[0]);
3199 			break;
3200 		case NPC_MPLS2_LBTCBOS:
3201 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1],
3202 						   rule->mask.mpls_lse[1]);
3203 			break;
3204 		case NPC_MPLS2_TTL:
3205 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1],
3206 					       rule->mask.mpls_lse[1]);
3207 			break;
3208 		case NPC_MPLS3_LBTCBOS:
3209 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2],
3210 						   rule->mask.mpls_lse[2]);
3211 			break;
3212 		case NPC_MPLS3_TTL:
3213 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2],
3214 					       rule->mask.mpls_lse[2]);
3215 			break;
3216 		case NPC_MPLS4_LBTCBOS:
3217 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3],
3218 						   rule->mask.mpls_lse[3]);
3219 			break;
3220 		case NPC_MPLS4_TTL:
3221 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
3222 					       rule->mask.mpls_lse[3]);
3223 			break;
3224 		case NPC_TYPE_ICMP:
3225 			seq_printf(s, "%d ", rule->packet.icmp_type);
3226 			seq_printf(s, "mask 0x%x\n", rule->mask.icmp_type);
3227 			break;
3228 		case NPC_CODE_ICMP:
3229 			seq_printf(s, "%d ", rule->packet.icmp_code);
3230 			seq_printf(s, "mask 0x%x\n", rule->mask.icmp_code);
3231 			break;
3232 		default:
3233 			seq_puts(s, "\n");
3234 			break;
3235 		}
3236 	}
3237 }
3238 
rvu_dbg_npc_mcam_show_action(struct seq_file * s,struct rvu_npc_mcam_rule * rule)3239 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
3240 					 struct rvu_npc_mcam_rule *rule)
3241 {
3242 	if (is_npc_intf_tx(rule->intf)) {
3243 		switch (rule->tx_action.op) {
3244 		case NIX_TX_ACTIONOP_DROP:
3245 			seq_puts(s, "\taction: Drop\n");
3246 			break;
3247 		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
3248 			seq_puts(s, "\taction: Unicast to default channel\n");
3249 			break;
3250 		case NIX_TX_ACTIONOP_UCAST_CHAN:
3251 			seq_printf(s, "\taction: Unicast to channel %d\n",
3252 				   rule->tx_action.index);
3253 			break;
3254 		case NIX_TX_ACTIONOP_MCAST:
3255 			seq_puts(s, "\taction: Multicast\n");
3256 			break;
3257 		case NIX_TX_ACTIONOP_DROP_VIOL:
3258 			seq_puts(s, "\taction: Lockdown Violation Drop\n");
3259 			break;
3260 		default:
3261 			break;
3262 		}
3263 	} else {
3264 		switch (rule->rx_action.op) {
3265 		case NIX_RX_ACTIONOP_DROP:
3266 			seq_puts(s, "\taction: Drop\n");
3267 			break;
3268 		case NIX_RX_ACTIONOP_UCAST:
3269 			seq_printf(s, "\taction: Direct to queue %d\n",
3270 				   rule->rx_action.index);
3271 			break;
3272 		case NIX_RX_ACTIONOP_RSS:
3273 			seq_puts(s, "\taction: RSS\n");
3274 			break;
3275 		case NIX_RX_ACTIONOP_UCAST_IPSEC:
3276 			seq_puts(s, "\taction: Unicast ipsec\n");
3277 			break;
3278 		case NIX_RX_ACTIONOP_MCAST:
3279 			seq_puts(s, "\taction: Multicast\n");
3280 			break;
3281 		default:
3282 			break;
3283 		}
3284 	}
3285 }
3286 
rvu_dbg_get_intf_name(int intf)3287 static const char *rvu_dbg_get_intf_name(int intf)
3288 {
3289 	switch (intf) {
3290 	case NIX_INTFX_RX(0):
3291 		return "NIX0_RX";
3292 	case NIX_INTFX_RX(1):
3293 		return "NIX1_RX";
3294 	case NIX_INTFX_TX(0):
3295 		return "NIX0_TX";
3296 	case NIX_INTFX_TX(1):
3297 		return "NIX1_TX";
3298 	default:
3299 		break;
3300 	}
3301 
3302 	return "unknown";
3303 }
3304 
rvu_dbg_npc_mcam_show_rules(struct seq_file * s,void * unused)3305 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
3306 {
3307 	struct rvu_npc_mcam_rule *iter;
3308 	struct rvu *rvu = s->private;
3309 	struct npc_mcam *mcam;
3310 	int pf, vf = -1;
3311 	bool enabled;
3312 	int blkaddr;
3313 	u16 target;
3314 	u64 hits;
3315 
3316 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3317 	if (blkaddr < 0)
3318 		return 0;
3319 
3320 	mcam = &rvu->hw->mcam;
3321 
3322 	mutex_lock(&mcam->lock);
3323 	list_for_each_entry(iter, &mcam->mcam_rules, list) {
3324 		pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
3325 		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
3326 
3327 		if (iter->owner & RVU_PFVF_FUNC_MASK) {
3328 			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
3329 			seq_printf(s, "VF%d", vf);
3330 		}
3331 		seq_puts(s, "\n");
3332 
3333 		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
3334 						    "RX" : "TX");
3335 		seq_printf(s, "\tinterface: %s\n",
3336 			   rvu_dbg_get_intf_name(iter->intf));
3337 		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
3338 
3339 		rvu_dbg_npc_mcam_show_flows(s, iter);
3340 		if (is_npc_intf_rx(iter->intf)) {
3341 			target = iter->rx_action.pf_func;
3342 			pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
3343 			seq_printf(s, "\tForward to: PF%d ", pf);
3344 
3345 			if (target & RVU_PFVF_FUNC_MASK) {
3346 				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
3347 				seq_printf(s, "VF%d", vf);
3348 			}
3349 			seq_puts(s, "\n");
3350 			seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
3351 			seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
3352 		}
3353 
3354 		rvu_dbg_npc_mcam_show_action(s, iter);
3355 
3356 		enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
3357 		seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
3358 
3359 		if (!iter->has_cntr)
3360 			continue;
3361 		seq_printf(s, "\tcounter: %d\n", iter->cntr);
3362 
3363 		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
3364 		seq_printf(s, "\thits: %lld\n", hits);
3365 	}
3366 	mutex_unlock(&mcam->lock);
3367 
3368 	return 0;
3369 }
3370 
3371 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
3372 
rvu_dbg_npc_exact_show_entries(struct seq_file * s,void * unused)3373 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
3374 {
3375 	struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
3376 	struct npc_exact_table_entry *cam_entry;
3377 	struct npc_exact_table *table;
3378 	struct rvu *rvu = s->private;
3379 	int i, j;
3380 
3381 	u8 bitmap = 0;
3382 
3383 	table = rvu->hw->table;
3384 
3385 	mutex_lock(&table->lock);
3386 
3387 	/* Check if there is at least one entry in mem table */
3388 	if (!table->mem_tbl_entry_cnt)
3389 		goto dump_cam_table;
3390 
3391 	/* Print table headers */
3392 	seq_puts(s, "\n\tExact Match MEM Table\n");
3393 	seq_puts(s, "Index\t");
3394 
3395 	for (i = 0; i < table->mem_table.ways; i++) {
3396 		mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
3397 							struct npc_exact_table_entry, list);
3398 
3399 		seq_printf(s, "Way-%d\t\t\t\t\t", i);
3400 	}
3401 
3402 	seq_puts(s, "\n");
3403 	for (i = 0; i < table->mem_table.ways; i++)
3404 		seq_puts(s, "\tChan  MAC                     \t");
3405 
3406 	seq_puts(s, "\n\n");
3407 
3408 	/* Print mem table entries */
3409 	for (i = 0; i < table->mem_table.depth; i++) {
3410 		bitmap = 0;
3411 		for (j = 0; j < table->mem_table.ways; j++) {
3412 			if (!mem_entry[j])
3413 				continue;
3414 
3415 			if (mem_entry[j]->index != i)
3416 				continue;
3417 
3418 			bitmap |= BIT(j);
3419 		}
3420 
3421 		/* No valid entries */
3422 		if (!bitmap)
3423 			continue;
3424 
3425 		seq_printf(s, "%d\t", i);
3426 		for (j = 0; j < table->mem_table.ways; j++) {
3427 			if (!(bitmap & BIT(j))) {
3428 				seq_puts(s, "nil\t\t\t\t\t");
3429 				continue;
3430 			}
3431 
3432 			seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
3433 				   mem_entry[j]->mac);
3434 			mem_entry[j] = list_next_entry(mem_entry[j], list);
3435 		}
3436 		seq_puts(s, "\n");
3437 	}
3438 
3439 dump_cam_table:
3440 
3441 	if (!table->cam_tbl_entry_cnt)
3442 		goto done;
3443 
3444 	seq_puts(s, "\n\tExact Match CAM Table\n");
3445 	seq_puts(s, "index\tchan\tMAC\n");
3446 
3447 	/* Traverse cam table entries */
3448 	list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
3449 		seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
3450 			   cam_entry->mac);
3451 	}
3452 
3453 done:
3454 	mutex_unlock(&table->lock);
3455 	return 0;
3456 }
3457 
3458 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
3459 
rvu_dbg_npc_exact_show_info(struct seq_file * s,void * unused)3460 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
3461 {
3462 	struct npc_exact_table *table;
3463 	struct rvu *rvu = s->private;
3464 	int i;
3465 
3466 	table = rvu->hw->table;
3467 
3468 	seq_puts(s, "\n\tExact Table Info\n");
3469 	seq_printf(s, "Exact Match Feature : %s\n",
3470 		   rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
3471 	if (!rvu->hw->cap.npc_exact_match_enabled)
3472 		return 0;
3473 
3474 	seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
3475 	for (i = 0; i < table->num_drop_rules; i++)
3476 		seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
3477 
3478 	seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
3479 	for (i = 0; i < table->num_drop_rules; i++)
3480 		seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
3481 
3482 	seq_puts(s, "\n\tMEM Table Info\n");
3483 	seq_printf(s, "Ways : %d\n", table->mem_table.ways);
3484 	seq_printf(s, "Depth : %d\n", table->mem_table.depth);
3485 	seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
3486 	seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
3487 	seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
3488 
3489 	seq_puts(s, "\n\tCAM Table Info\n");
3490 	seq_printf(s, "Depth : %d\n", table->cam_table.depth);
3491 
3492 	return 0;
3493 }
3494 
3495 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
3496 
rvu_dbg_npc_exact_drop_cnt(struct seq_file * s,void * unused)3497 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
3498 {
3499 	struct npc_exact_table *table;
3500 	struct rvu *rvu = s->private;
3501 	struct npc_key_field *field;
3502 	u16 chan, pcifunc;
3503 	int blkaddr, i;
3504 	u64 cfg, cam1;
3505 	char *str;
3506 
3507 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3508 	table = rvu->hw->table;
3509 
3510 	field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
3511 
3512 	seq_puts(s, "\n\t Exact Hit on drop status\n");
3513 	seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
3514 
3515 	for (i = 0; i < table->num_drop_rules; i++) {
3516 		pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
3517 		cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
3518 
3519 		/* channel will be always in keyword 0 */
3520 		cam1 = rvu_read64(rvu, blkaddr,
3521 				  NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
3522 		chan = field->kw_mask[0] & cam1;
3523 
3524 		str = (cfg & 1) ? "enabled" : "disabled";
3525 
3526 		seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
3527 			   rvu_read64(rvu, blkaddr,
3528 				      NPC_AF_MATCH_STATX(table->counter_idx[i])),
3529 			   chan, str);
3530 	}
3531 
3532 	return 0;
3533 }
3534 
3535 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
3536 
rvu_dbg_npc_init(struct rvu * rvu)3537 static void rvu_dbg_npc_init(struct rvu *rvu)
3538 {
3539 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
3540 
3541 	debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
3542 			    &rvu_dbg_npc_mcam_info_fops);
3543 	debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
3544 			    &rvu_dbg_npc_mcam_rules_fops);
3545 
3546 	debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
3547 			    &rvu_dbg_npc_rx_miss_act_fops);
3548 
3549 	if (!rvu->hw->cap.npc_exact_match_enabled)
3550 		return;
3551 
3552 	debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
3553 			    &rvu_dbg_npc_exact_entries_fops);
3554 
3555 	debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
3556 			    &rvu_dbg_npc_exact_info_fops);
3557 
3558 	debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
3559 			    &rvu_dbg_npc_exact_drop_cnt_fops);
3560 
3561 }
3562 
cpt_eng_sts_display(struct seq_file * filp,u8 eng_type)3563 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
3564 {
3565 	struct cpt_ctx *ctx = filp->private;
3566 	u64 busy_sts = 0, free_sts = 0;
3567 	u32 e_min = 0, e_max = 0, e, i;
3568 	u16 max_ses, max_ies, max_aes;
3569 	struct rvu *rvu = ctx->rvu;
3570 	int blkaddr = ctx->blkaddr;
3571 	u64 reg;
3572 
3573 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3574 	max_ses = reg & 0xffff;
3575 	max_ies = (reg >> 16) & 0xffff;
3576 	max_aes = (reg >> 32) & 0xffff;
3577 
3578 	switch (eng_type) {
3579 	case CPT_AE_TYPE:
3580 		e_min = max_ses + max_ies;
3581 		e_max = max_ses + max_ies + max_aes;
3582 		break;
3583 	case CPT_SE_TYPE:
3584 		e_min = 0;
3585 		e_max = max_ses;
3586 		break;
3587 	case CPT_IE_TYPE:
3588 		e_min = max_ses;
3589 		e_max = max_ses + max_ies;
3590 		break;
3591 	default:
3592 		return -EINVAL;
3593 	}
3594 
3595 	for (e = e_min, i = 0; e < e_max; e++, i++) {
3596 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
3597 		if (reg & 0x1)
3598 			busy_sts |= 1ULL << i;
3599 
3600 		if (reg & 0x2)
3601 			free_sts |= 1ULL << i;
3602 	}
3603 	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
3604 	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
3605 
3606 	return 0;
3607 }
3608 
rvu_dbg_cpt_ae_sts_display(struct seq_file * filp,void * unused)3609 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
3610 {
3611 	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
3612 }
3613 
3614 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
3615 
rvu_dbg_cpt_se_sts_display(struct seq_file * filp,void * unused)3616 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
3617 {
3618 	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
3619 }
3620 
3621 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
3622 
rvu_dbg_cpt_ie_sts_display(struct seq_file * filp,void * unused)3623 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
3624 {
3625 	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
3626 }
3627 
3628 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
3629 
rvu_dbg_cpt_engines_info_display(struct seq_file * filp,void * unused)3630 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
3631 {
3632 	struct cpt_ctx *ctx = filp->private;
3633 	u16 max_ses, max_ies, max_aes;
3634 	struct rvu *rvu = ctx->rvu;
3635 	int blkaddr = ctx->blkaddr;
3636 	u32 e_max, e;
3637 	u64 reg;
3638 
3639 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3640 	max_ses = reg & 0xffff;
3641 	max_ies = (reg >> 16) & 0xffff;
3642 	max_aes = (reg >> 32) & 0xffff;
3643 
3644 	e_max = max_ses + max_ies + max_aes;
3645 
3646 	seq_puts(filp, "===========================================\n");
3647 	for (e = 0; e < e_max; e++) {
3648 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
3649 		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
3650 			   reg & 0xff);
3651 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
3652 		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
3653 			   reg);
3654 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
3655 		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
3656 			   reg);
3657 		seq_puts(filp, "===========================================\n");
3658 	}
3659 	return 0;
3660 }
3661 
3662 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
3663 
rvu_dbg_cpt_lfs_info_display(struct seq_file * filp,void * unused)3664 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
3665 {
3666 	struct cpt_ctx *ctx = filp->private;
3667 	int blkaddr = ctx->blkaddr;
3668 	struct rvu *rvu = ctx->rvu;
3669 	struct rvu_block *block;
3670 	struct rvu_hwinfo *hw;
3671 	u64 reg;
3672 	u32 lf;
3673 
3674 	hw = rvu->hw;
3675 	block = &hw->block[blkaddr];
3676 	if (!block->lf.bmap)
3677 		return -ENODEV;
3678 
3679 	seq_puts(filp, "===========================================\n");
3680 	for (lf = 0; lf < block->lf.max; lf++) {
3681 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
3682 		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
3683 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
3684 		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
3685 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
3686 		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
3687 		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
3688 				(lf << block->lfshift));
3689 		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
3690 		seq_puts(filp, "===========================================\n");
3691 	}
3692 	return 0;
3693 }
3694 
3695 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
3696 
rvu_dbg_cpt_err_info_display(struct seq_file * filp,void * unused)3697 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
3698 {
3699 	struct cpt_ctx *ctx = filp->private;
3700 	struct rvu *rvu = ctx->rvu;
3701 	int blkaddr = ctx->blkaddr;
3702 	u64 reg0, reg1;
3703 
3704 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
3705 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
3706 	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
3707 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
3708 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
3709 	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
3710 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
3711 	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
3712 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
3713 	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
3714 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
3715 	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
3716 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
3717 	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
3718 
3719 	return 0;
3720 }
3721 
3722 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
3723 
rvu_dbg_cpt_pc_display(struct seq_file * filp,void * unused)3724 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
3725 {
3726 	struct cpt_ctx *ctx = filp->private;
3727 	struct rvu *rvu = ctx->rvu;
3728 	int blkaddr = ctx->blkaddr;
3729 	u64 reg;
3730 
3731 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
3732 	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
3733 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
3734 	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
3735 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
3736 	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
3737 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
3738 	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
3739 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
3740 	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
3741 	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
3742 	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
3743 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
3744 	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
3745 
3746 	return 0;
3747 }
3748 
3749 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
3750 
rvu_dbg_cpt_init(struct rvu * rvu,int blkaddr)3751 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
3752 {
3753 	struct cpt_ctx *ctx;
3754 
3755 	if (!is_block_implemented(rvu->hw, blkaddr))
3756 		return;
3757 
3758 	if (blkaddr == BLKADDR_CPT0) {
3759 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
3760 		ctx = &rvu->rvu_dbg.cpt_ctx[0];
3761 		ctx->blkaddr = BLKADDR_CPT0;
3762 		ctx->rvu = rvu;
3763 	} else {
3764 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
3765 						      rvu->rvu_dbg.root);
3766 		ctx = &rvu->rvu_dbg.cpt_ctx[1];
3767 		ctx->blkaddr = BLKADDR_CPT1;
3768 		ctx->rvu = rvu;
3769 	}
3770 
3771 	debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3772 			    &rvu_dbg_cpt_pc_fops);
3773 	debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3774 			    &rvu_dbg_cpt_ae_sts_fops);
3775 	debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3776 			    &rvu_dbg_cpt_se_sts_fops);
3777 	debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3778 			    &rvu_dbg_cpt_ie_sts_fops);
3779 	debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
3780 			    &rvu_dbg_cpt_engines_info_fops);
3781 	debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
3782 			    &rvu_dbg_cpt_lfs_info_fops);
3783 	debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
3784 			    &rvu_dbg_cpt_err_info_fops);
3785 }
3786 
rvu_get_dbg_dir_name(struct rvu * rvu)3787 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
3788 {
3789 	if (!is_rvu_otx2(rvu))
3790 		return "cn10k";
3791 	else
3792 		return "octeontx2";
3793 }
3794 
rvu_dbg_init(struct rvu * rvu)3795 void rvu_dbg_init(struct rvu *rvu)
3796 {
3797 	rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
3798 
3799 	debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
3800 			    &rvu_dbg_rsrc_status_fops);
3801 
3802 	if (!is_rvu_otx2(rvu))
3803 		debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
3804 				    rvu, &rvu_dbg_lmtst_map_table_fops);
3805 
3806 	if (!cgx_get_cgxcnt_max())
3807 		goto create;
3808 
3809 	if (is_rvu_otx2(rvu))
3810 		debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
3811 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3812 	else
3813 		debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
3814 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3815 
3816 create:
3817 	rvu_dbg_npa_init(rvu);
3818 	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
3819 
3820 	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
3821 	rvu_dbg_cgx_init(rvu);
3822 	rvu_dbg_npc_init(rvu);
3823 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
3824 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
3825 	rvu_dbg_mcs_init(rvu);
3826 }
3827 
rvu_dbg_exit(struct rvu * rvu)3828 void rvu_dbg_exit(struct rvu *rvu)
3829 {
3830 	debugfs_remove_recursive(rvu->rvu_dbg.root);
3831 }
3832 
3833 #endif /* CONFIG_DEBUG_FS */
3834