1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2 /* Copyright 2017-2019 NXP */
3 
4 #include <linux/timer.h>
5 #include <linux/pci.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/skbuff.h>
10 #include <linux/ethtool.h>
11 #include <linux/if_vlan.h>
12 #include <linux/phylink.h>
13 #include <linux/dim.h>
14 #include <net/xdp.h>
15 
16 #include "enetc_hw.h"
17 #include "enetc4_hw.h"
18 
19 #define ENETC_MAC_MAXFRM_SIZE	9600
20 #define ENETC_MAX_MTU		(ENETC_MAC_MAXFRM_SIZE - \
21 				(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
22 
23 #define ENETC_CBD_DATA_MEM_ALIGN 64
24 
25 struct enetc_tx_swbd {
26 	union {
27 		struct sk_buff *skb;
28 		struct xdp_frame *xdp_frame;
29 	};
30 	dma_addr_t dma;
31 	struct page *page;	/* valid only if is_xdp_tx */
32 	u16 page_offset;	/* valid only if is_xdp_tx */
33 	u16 len;
34 	enum dma_data_direction dir;
35 	u8 is_dma_page:1;
36 	u8 check_wb:1;
37 	u8 do_twostep_tstamp:1;
38 	u8 is_eof:1;
39 	u8 is_xdp_tx:1;
40 	u8 is_xdp_redirect:1;
41 	u8 qbv_en:1;
42 };
43 
44 struct enetc_lso_t {
45 	bool	ipv6;
46 	bool	tcp;
47 	u8	l3_hdr_len;
48 	u8	hdr_len; /* LSO header length */
49 	u8	l3_start;
50 	u16	lso_seg_size;
51 	int	total_len; /* total data length, not include LSO header */
52 };
53 
54 #define ENETC_LSO_MAX_DATA_LEN		SZ_256K
55 
56 #define ENETC_RX_MAXFRM_SIZE	ENETC_MAC_MAXFRM_SIZE
57 #define ENETC_RXB_TRUESIZE	2048 /* PAGE_SIZE >> 1 */
58 #define ENETC_RXB_PAD		NET_SKB_PAD /* add extra space if needed */
59 #define ENETC_RXB_DMA_SIZE	\
60 	(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
61 #define ENETC_RXB_DMA_SIZE_XDP	\
62 	(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM)
63 
64 struct enetc_rx_swbd {
65 	dma_addr_t dma;
66 	struct page *page;
67 	u16 page_offset;
68 	enum dma_data_direction dir;
69 	u16 len;
70 };
71 
72 /* ENETC overhead: optional extension BD + 1 BD gap */
73 #define ENETC_TXBDS_NEEDED(val)	((val) + 2)
74 /* For LS1028A, max # of chained Tx BDs is 15, including head and
75  * extension BD.
76  */
77 #define ENETC_MAX_SKB_FRAGS	13
78 /* For ENETC v4 and later versions, max # of chained Tx BDs is 63,
79  * including head and extension BD, but the range of MAX_SKB_FRAGS
80  * is 17 ~ 45, so set ENETC4_MAX_SKB_FRAGS to MAX_SKB_FRAGS.
81  */
82 #define ENETC4_MAX_SKB_FRAGS		MAX_SKB_FRAGS
83 #define ENETC_TXBDS_MAX_NEEDED(x)	ENETC_TXBDS_NEEDED((x) + 1)
84 
85 struct enetc_ring_stats {
86 	unsigned int packets;
87 	unsigned int bytes;
88 	unsigned int rx_alloc_errs;
89 	unsigned int xdp_drops;
90 	unsigned int xdp_tx;
91 	unsigned int xdp_tx_drops;
92 	unsigned int xdp_redirect;
93 	unsigned int xdp_redirect_failures;
94 	unsigned int recycles;
95 	unsigned int recycle_failures;
96 	unsigned int win_drop;
97 };
98 
99 struct enetc_xdp_data {
100 	struct xdp_rxq_info rxq;
101 	struct bpf_prog *prog;
102 	int xdp_tx_in_flight;
103 };
104 
105 #define ENETC_RX_RING_DEFAULT_SIZE	2048
106 #define ENETC_TX_RING_DEFAULT_SIZE	2048
107 #define ENETC_DEFAULT_TX_WORK		(ENETC_TX_RING_DEFAULT_SIZE / 2)
108 
109 struct enetc_bdr_resource {
110 	/* Input arguments saved for teardown */
111 	struct device *dev; /* for DMA mapping */
112 	size_t bd_count;
113 	size_t bd_size;
114 
115 	/* Resource proper */
116 	void *bd_base; /* points to Rx or Tx BD ring */
117 	dma_addr_t bd_dma_base;
118 	union {
119 		struct enetc_tx_swbd *tx_swbd;
120 		struct enetc_rx_swbd *rx_swbd;
121 	};
122 	char *tso_headers;
123 	dma_addr_t tso_headers_dma;
124 };
125 
126 struct enetc_bdr {
127 	struct device *dev; /* for DMA mapping */
128 	struct net_device *ndev;
129 	void *bd_base; /* points to Rx or Tx BD ring */
130 	union {
131 		void __iomem *tpir;
132 		void __iomem *rcir;
133 	};
134 	u16 index;
135 	u16 prio;
136 	int bd_count; /* # of BDs */
137 	int next_to_use;
138 	int next_to_clean;
139 	union {
140 		struct enetc_tx_swbd *tx_swbd;
141 		struct enetc_rx_swbd *rx_swbd;
142 	};
143 	union {
144 		void __iomem *tcir; /* Tx */
145 		int next_to_alloc; /* Rx */
146 	};
147 	void __iomem *idr; /* Interrupt Detect Register pointer */
148 
149 	int buffer_offset;
150 	struct enetc_xdp_data xdp;
151 
152 	struct enetc_ring_stats stats;
153 
154 	dma_addr_t bd_dma_base;
155 	u8 tsd_enable; /* Time specific departure */
156 	bool ext_en; /* enable h/w descriptor extensions */
157 
158 	/* DMA buffer for TSO headers */
159 	char *tso_headers;
160 	dma_addr_t tso_headers_dma;
161 } ____cacheline_aligned_in_smp;
162 
enetc_bdr_idx_inc(struct enetc_bdr * bdr,int * i)163 static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
164 {
165 	if (unlikely(++*i == bdr->bd_count))
166 		*i = 0;
167 }
168 
enetc_bd_unused(struct enetc_bdr * bdr)169 static inline int enetc_bd_unused(struct enetc_bdr *bdr)
170 {
171 	if (bdr->next_to_clean > bdr->next_to_use)
172 		return bdr->next_to_clean - bdr->next_to_use - 1;
173 
174 	return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1;
175 }
176 
enetc_swbd_unused(struct enetc_bdr * bdr)177 static inline int enetc_swbd_unused(struct enetc_bdr *bdr)
178 {
179 	if (bdr->next_to_clean > bdr->next_to_alloc)
180 		return bdr->next_to_clean - bdr->next_to_alloc - 1;
181 
182 	return bdr->bd_count + bdr->next_to_clean - bdr->next_to_alloc - 1;
183 }
184 
185 /* Control BD ring */
186 #define ENETC_CBDR_DEFAULT_SIZE	64
187 struct enetc_cbdr {
188 	void *bd_base; /* points to Rx or Tx BD ring */
189 	void __iomem *pir;
190 	void __iomem *cir;
191 	void __iomem *mr; /* mode register */
192 
193 	int bd_count; /* # of BDs */
194 	int next_to_use;
195 	int next_to_clean;
196 
197 	dma_addr_t bd_dma_base;
198 	struct device *dma_dev;
199 };
200 
201 #define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
202 
enetc_rxbd(struct enetc_bdr * rx_ring,int i)203 static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
204 {
205 	int hw_idx = i;
206 
207 	if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en)
208 		hw_idx = 2 * i;
209 
210 	return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
211 }
212 
enetc_rxbd_next(struct enetc_bdr * rx_ring,union enetc_rx_bd ** old_rxbd,int * old_index)213 static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
214 				   union enetc_rx_bd **old_rxbd, int *old_index)
215 {
216 	union enetc_rx_bd *new_rxbd = *old_rxbd;
217 	int new_index = *old_index;
218 
219 	new_rxbd++;
220 
221 	if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en)
222 		new_rxbd++;
223 
224 	if (unlikely(++new_index == rx_ring->bd_count)) {
225 		new_rxbd = rx_ring->bd_base;
226 		new_index = 0;
227 	}
228 
229 	*old_rxbd = new_rxbd;
230 	*old_index = new_index;
231 }
232 
enetc_rxbd_ext(union enetc_rx_bd * rxbd)233 static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd)
234 {
235 	return ++rxbd;
236 }
237 
238 struct enetc_msg_swbd {
239 	void *vaddr;
240 	dma_addr_t dma;
241 	int size;
242 };
243 
244 #define ENETC_REV1	0x1
245 enum enetc_errata {
246 	ENETC_ERR_VLAN_ISOL	= BIT(0),
247 	ENETC_ERR_UCMCSWP	= BIT(1),
248 };
249 
250 #define ENETC_SI_F_PSFP BIT(0)
251 #define ENETC_SI_F_QBV  BIT(1)
252 #define ENETC_SI_F_QBU  BIT(2)
253 #define ENETC_SI_F_LSO	BIT(3)
254 
255 struct enetc_drvdata {
256 	u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */
257 	u8 tx_csum:1;
258 	u8 max_frags;
259 	u64 sysclk_freq;
260 	const struct ethtool_ops *eth_ops;
261 };
262 
263 struct enetc_platform_info {
264 	u16 revision;
265 	u16 dev_id;
266 	const struct enetc_drvdata *data;
267 };
268 
269 /* PCI IEP device data */
270 struct enetc_si {
271 	struct pci_dev *pdev;
272 	struct enetc_hw hw;
273 	enum enetc_errata errata;
274 
275 	struct net_device *ndev; /* back ref. */
276 
277 	struct enetc_cbdr cbd_ring;
278 
279 	int num_rx_rings; /* how many rings are available in the SI */
280 	int num_tx_rings;
281 	int num_fs_entries;
282 	int num_rss; /* number of RSS buckets */
283 	unsigned short pad;
284 	u16 revision;
285 	int hw_features;
286 	const struct enetc_drvdata *drvdata;
287 };
288 
289 #define ENETC_SI_ALIGN	32
290 
is_enetc_rev1(struct enetc_si * si)291 static inline bool is_enetc_rev1(struct enetc_si *si)
292 {
293 	return si->pdev->revision == ENETC_REV1;
294 }
295 
enetc_si_priv(const struct enetc_si * si)296 static inline void *enetc_si_priv(const struct enetc_si *si)
297 {
298 	return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN);
299 }
300 
enetc_si_is_pf(struct enetc_si * si)301 static inline bool enetc_si_is_pf(struct enetc_si *si)
302 {
303 	return !!(si->hw.port);
304 }
305 
enetc_pf_to_port(struct pci_dev * pf_pdev)306 static inline int enetc_pf_to_port(struct pci_dev *pf_pdev)
307 {
308 	switch (pf_pdev->devfn) {
309 	case 0:
310 		return 0;
311 	case 1:
312 		return 1;
313 	case 2:
314 		return 2;
315 	case 6:
316 		return 3;
317 	default:
318 		return -1;
319 	}
320 }
321 
322 #define ENETC_MAX_NUM_TXQS	8
323 #define ENETC_INT_NAME_MAX	(IFNAMSIZ + 8)
324 
325 struct enetc_int_vector {
326 	void __iomem *rbier;
327 	void __iomem *tbier_base;
328 	void __iomem *ricr1;
329 	unsigned long tx_rings_map;
330 	int count_tx_rings;
331 	u32 rx_ictt;
332 	u16 comp_cnt;
333 	bool rx_dim_en, rx_napi_work;
334 	struct napi_struct napi ____cacheline_aligned_in_smp;
335 	struct dim rx_dim ____cacheline_aligned_in_smp;
336 	char name[ENETC_INT_NAME_MAX];
337 
338 	struct enetc_bdr rx_ring;
339 	struct enetc_bdr tx_ring[] __counted_by(count_tx_rings);
340 } ____cacheline_aligned_in_smp;
341 
342 struct enetc_cls_rule {
343 	struct ethtool_rx_flow_spec fs;
344 	int used;
345 };
346 
347 #define ENETC_MAX_BDR_INT	6 /* fixed to max # of available cpus */
348 struct psfp_cap {
349 	u32 max_streamid;
350 	u32 max_psfp_filter;
351 	u32 max_psfp_gate;
352 	u32 max_psfp_gatelist;
353 	u32 max_psfp_meter;
354 };
355 
356 #define ENETC_F_TX_TSTAMP_MASK	0xff
357 enum enetc_active_offloads {
358 	/* 8 bits reserved for TX timestamp types (hwtstamp_tx_types) */
359 	ENETC_F_TX_TSTAMP		= BIT(0),
360 	ENETC_F_TX_ONESTEP_SYNC_TSTAMP	= BIT(1),
361 
362 	ENETC_F_RX_TSTAMP		= BIT(8),
363 	ENETC_F_QBV			= BIT(9),
364 	ENETC_F_QCI			= BIT(10),
365 	ENETC_F_QBU			= BIT(11),
366 	ENETC_F_TXCSUM			= BIT(12),
367 	ENETC_F_LSO			= BIT(13),
368 };
369 
370 enum enetc_flags_bit {
371 	ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
372 	ENETC_TX_DOWN,
373 };
374 
375 /* interrupt coalescing modes */
376 enum enetc_ic_mode {
377 	/* one interrupt per frame */
378 	ENETC_IC_NONE = 0,
379 	/* activated when int coalescing time is set to a non-0 value */
380 	ENETC_IC_RX_MANUAL = BIT(0),
381 	ENETC_IC_TX_MANUAL = BIT(1),
382 	/* use dynamic interrupt moderation */
383 	ENETC_IC_RX_ADAPTIVE = BIT(2),
384 };
385 
386 #define ENETC_RXIC_PKTTHR	min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2)
387 #define ENETC_TXIC_PKTTHR	min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2)
388 
389 struct enetc_ndev_priv {
390 	struct net_device *ndev;
391 	struct device *dev; /* dma-mapping device */
392 	struct enetc_si *si;
393 
394 	int bdr_int_num; /* number of Rx/Tx ring interrupts */
395 	struct enetc_int_vector *int_vector[ENETC_MAX_BDR_INT];
396 	u16 num_rx_rings, num_tx_rings;
397 	u16 rx_bd_count, tx_bd_count;
398 
399 	u16 msg_enable;
400 
401 	u8 preemptible_tcs;
402 	u8 max_frags; /* The maximum number of BDs for fragments */
403 
404 	enum enetc_active_offloads active_offloads;
405 
406 	u32 speed; /* store speed for compare update pspeed */
407 
408 	struct enetc_bdr **xdp_tx_ring;
409 	struct enetc_bdr *tx_ring[16];
410 	struct enetc_bdr *rx_ring[16];
411 	const struct enetc_bdr_resource *tx_res;
412 	const struct enetc_bdr_resource *rx_res;
413 
414 	struct enetc_cls_rule *cls_rules;
415 
416 	struct psfp_cap psfp_cap;
417 
418 	/* Minimum number of TX queues required by the network stack */
419 	unsigned int min_num_stack_tx_queues;
420 
421 	struct phylink *phylink;
422 	int ic_mode;
423 	u32 tx_ictt;
424 
425 	struct bpf_prog *xdp_prog;
426 
427 	unsigned long flags;
428 
429 	struct work_struct	tx_onestep_tstamp;
430 	struct sk_buff_head	tx_skbs;
431 
432 	/* Serialize access to MAC Merge state between ethtool requests
433 	 * and link state updates
434 	 */
435 	struct mutex		mm_lock;
436 
437 	struct clk *ref_clk; /* RGMII/RMII reference clock */
438 	u64 sysclk_freq; /* NETC system clock frequency */
439 };
440 
441 /* Messaging */
442 
443 /* VF-PF set primary MAC address message format */
444 struct enetc_msg_cmd_set_primary_mac {
445 	struct enetc_msg_cmd_header header;
446 	struct sockaddr mac;
447 };
448 
449 #define ENETC_CBD(R, i)	(&(((struct enetc_cbd *)((R).bd_base))[i]))
450 
451 #define ENETC_CBDR_TIMEOUT	1000 /* usecs */
452 
453 /* PTP driver exports */
454 extern int enetc_phc_index;
455 
456 /* SI common */
457 u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg);
458 void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val);
459 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
460 void enetc_pci_remove(struct pci_dev *pdev);
461 int enetc_alloc_msix(struct enetc_ndev_priv *priv);
462 void enetc_free_msix(struct enetc_ndev_priv *priv);
463 void enetc_get_si_caps(struct enetc_si *si);
464 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
465 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
466 void enetc_free_si_resources(struct enetc_ndev_priv *priv);
467 int enetc_configure_si(struct enetc_ndev_priv *priv);
468 int enetc_get_driver_data(struct enetc_si *si);
469 
470 int enetc_open(struct net_device *ndev);
471 int enetc_close(struct net_device *ndev);
472 void enetc_start(struct net_device *ndev);
473 void enetc_stop(struct net_device *ndev);
474 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
475 struct net_device_stats *enetc_get_stats(struct net_device *ndev);
476 void enetc_set_features(struct net_device *ndev, netdev_features_t features);
477 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
478 int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
479 void enetc_reset_tc_mqprio(struct net_device *ndev);
480 int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
481 int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
482 		   struct xdp_frame **frames, u32 flags);
483 
484 /* ethtool */
485 extern const struct ethtool_ops enetc_pf_ethtool_ops;
486 extern const struct ethtool_ops enetc4_pf_ethtool_ops;
487 extern const struct ethtool_ops enetc_vf_ethtool_ops;
488 void enetc_set_ethtool_ops(struct net_device *ndev);
489 void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link);
490 void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv);
491 
492 /* control buffer descriptor ring (CBDR) */
493 int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
494 		     struct enetc_cbdr *cbdr);
495 void enetc_teardown_cbdr(struct enetc_cbdr *cbdr);
496 int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
497 			    char *mac_addr, int si_map);
498 int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
499 int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
500 		       int index);
501 void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
502 int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
503 int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
504 int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
505 
enetc_cbd_alloc_data_mem(struct enetc_si * si,struct enetc_cbd * cbd,int size,dma_addr_t * dma,void ** data_align)506 static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si,
507 					     struct enetc_cbd *cbd,
508 					     int size, dma_addr_t *dma,
509 					     void **data_align)
510 {
511 	struct enetc_cbdr *ring = &si->cbd_ring;
512 	dma_addr_t dma_align;
513 	void *data;
514 
515 	data = dma_alloc_coherent(ring->dma_dev,
516 				  size + ENETC_CBD_DATA_MEM_ALIGN,
517 				  dma, GFP_KERNEL);
518 	if (!data) {
519 		dev_err(ring->dma_dev, "CBD alloc data memory failed!\n");
520 		return NULL;
521 	}
522 
523 	dma_align = ALIGN(*dma, ENETC_CBD_DATA_MEM_ALIGN);
524 	*data_align = PTR_ALIGN(data, ENETC_CBD_DATA_MEM_ALIGN);
525 
526 	cbd->addr[0] = cpu_to_le32(lower_32_bits(dma_align));
527 	cbd->addr[1] = cpu_to_le32(upper_32_bits(dma_align));
528 	cbd->length = cpu_to_le16(size);
529 
530 	return data;
531 }
532 
enetc_cbd_free_data_mem(struct enetc_si * si,int size,void * data,dma_addr_t * dma)533 static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
534 					   void *data, dma_addr_t *dma)
535 {
536 	struct enetc_cbdr *ring = &si->cbd_ring;
537 
538 	dma_free_coherent(ring->dma_dev, size + ENETC_CBD_DATA_MEM_ALIGN,
539 			  data, *dma);
540 }
541 
542 void enetc_reset_ptcmsdur(struct enetc_hw *hw);
543 void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *queue_max_sdu);
544 
545 #ifdef CONFIG_FSL_ENETC_QOS
546 int enetc_qos_query_caps(struct net_device *ndev, void *type_data);
547 int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
548 void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
549 int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
550 int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data);
551 int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
552 			    void *cb_priv);
553 int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
554 int enetc_psfp_init(struct enetc_ndev_priv *priv);
555 int enetc_psfp_clean(struct enetc_ndev_priv *priv);
556 int enetc_set_psfp(struct net_device *ndev, bool en);
557 
enetc_get_max_cap(struct enetc_ndev_priv * priv)558 static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
559 {
560 	struct enetc_hw *hw = &priv->si->hw;
561 	u32 reg;
562 
563 	reg = enetc_port_rd(hw, ENETC_PSIDCAPR);
564 	priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
565 	/* Port stream filter capability */
566 	reg = enetc_port_rd(hw, ENETC_PSFCAPR);
567 	priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
568 	/* Port stream gate capability */
569 	reg = enetc_port_rd(hw, ENETC_PSGCAPR);
570 	priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
571 	priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
572 	/* Port flow meter capability */
573 	reg = enetc_port_rd(hw, ENETC_PFMCAPR);
574 	priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
575 }
576 
enetc_psfp_enable(struct enetc_ndev_priv * priv)577 static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
578 {
579 	struct enetc_hw *hw = &priv->si->hw;
580 	int err;
581 
582 	enetc_get_max_cap(priv);
583 
584 	err = enetc_psfp_init(priv);
585 	if (err)
586 		return err;
587 
588 	enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) |
589 		 ENETC_PPSFPMR_PSFPEN | ENETC_PPSFPMR_VS |
590 		 ENETC_PPSFPMR_PVC | ENETC_PPSFPMR_PVZC);
591 
592 	return 0;
593 }
594 
enetc_psfp_disable(struct enetc_ndev_priv * priv)595 static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
596 {
597 	struct enetc_hw *hw = &priv->si->hw;
598 	int err;
599 
600 	err = enetc_psfp_clean(priv);
601 	if (err)
602 		return err;
603 
604 	enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) &
605 		 ~ENETC_PPSFPMR_PSFPEN & ~ENETC_PPSFPMR_VS &
606 		 ~ENETC_PPSFPMR_PVC & ~ENETC_PPSFPMR_PVZC);
607 
608 	memset(&priv->psfp_cap, 0, sizeof(struct psfp_cap));
609 
610 	return 0;
611 }
612 
613 #else
614 #define enetc_qos_query_caps(ndev, type_data) -EOPNOTSUPP
615 #define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
616 #define enetc_sched_speed_set(priv, speed) (void)0
617 #define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
618 #define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP
619 #define enetc_setup_tc_psfp(ndev, type_data) -EOPNOTSUPP
620 #define enetc_setup_tc_block_cb NULL
621 
622 #define enetc_get_max_cap(p)		\
623 	memset(&((p)->psfp_cap), 0, sizeof(struct psfp_cap))
624 
enetc_psfp_enable(struct enetc_ndev_priv * priv)625 static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
626 {
627 	return 0;
628 }
629 
enetc_psfp_disable(struct enetc_ndev_priv * priv)630 static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
631 {
632 	return 0;
633 }
634 
enetc_set_psfp(struct net_device * ndev,bool en)635 static inline int enetc_set_psfp(struct net_device *ndev, bool en)
636 {
637 	return 0;
638 }
639 #endif
640