1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_NET_QUEUES_H
3 #define _LINUX_NET_QUEUES_H
4 
5 #include <linux/netdevice.h>
6 
7 /**
8  * struct netdev_config - queue-related configuration for a netdev
9  * @hds_thresh:		HDS Threshold value.
10  * @hds_config:		HDS value from userspace.
11  */
12 struct netdev_config {
13 	u32	hds_thresh;
14 	u8	hds_config;
15 };
16 
17 /* See the netdev.yaml spec for definition of each statistic */
18 struct netdev_queue_stats_rx {
19 	u64 bytes;
20 	u64 packets;
21 	u64 alloc_fail;
22 
23 	u64 hw_drops;
24 	u64 hw_drop_overruns;
25 
26 	u64 csum_unnecessary;
27 	u64 csum_none;
28 	u64 csum_bad;
29 
30 	u64 hw_gro_packets;
31 	u64 hw_gro_bytes;
32 	u64 hw_gro_wire_packets;
33 	u64 hw_gro_wire_bytes;
34 
35 	u64 hw_drop_ratelimits;
36 };
37 
38 struct netdev_queue_stats_tx {
39 	u64 bytes;
40 	u64 packets;
41 
42 	u64 hw_drops;
43 	u64 hw_drop_errors;
44 
45 	u64 csum_none;
46 	u64 needs_csum;
47 
48 	u64 hw_gso_packets;
49 	u64 hw_gso_bytes;
50 	u64 hw_gso_wire_packets;
51 	u64 hw_gso_wire_bytes;
52 
53 	u64 hw_drop_ratelimits;
54 
55 	u64 stop;
56 	u64 wake;
57 };
58 
59 /**
60  * struct netdev_stat_ops - netdev ops for fine grained stats
61  * @get_queue_stats_rx:	get stats for a given Rx queue
62  * @get_queue_stats_tx:	get stats for a given Tx queue
63  * @get_base_stats:	get base stats (not belonging to any live instance)
64  *
65  * Query stats for a given object. The values of the statistics are undefined
66  * on entry (specifically they are *not* zero-initialized). Drivers should
67  * assign values only to the statistics they collect. Statistics which are not
68  * collected must be left undefined.
69  *
70  * Queue objects are not necessarily persistent, and only currently active
71  * queues are queried by the per-queue callbacks. This means that per-queue
72  * statistics will not generally add up to the total number of events for
73  * the device. The @get_base_stats callback allows filling in the delta
74  * between events for currently live queues and overall device history.
75  * @get_base_stats can also be used to report any miscellaneous packets
76  * transferred outside of the main set of queues used by the networking stack.
77  * When the statistics for the entire device are queried, first @get_base_stats
78  * is issued to collect the delta, and then a series of per-queue callbacks.
79  * Only statistics which are set in @get_base_stats will be reported
80  * at the device level, meaning that unlike in queue callbacks, setting
81  * a statistic to zero in @get_base_stats is a legitimate thing to do.
82  * This is because @get_base_stats has a second function of designating which
83  * statistics are in fact correct for the entire device (e.g. when history
84  * for some of the events is not maintained, and reliable "total" cannot
85  * be provided).
86  *
87  * Device drivers can assume that when collecting total device stats,
88  * the @get_base_stats and subsequent per-queue calls are performed
89  * "atomically" (without releasing the rtnl_lock).
90  *
91  * Device drivers are encouraged to reset the per-queue statistics when
92  * number of queues change. This is because the primary use case for
93  * per-queue statistics is currently to detect traffic imbalance.
94  */
95 struct netdev_stat_ops {
96 	void (*get_queue_stats_rx)(struct net_device *dev, int idx,
97 				   struct netdev_queue_stats_rx *stats);
98 	void (*get_queue_stats_tx)(struct net_device *dev, int idx,
99 				   struct netdev_queue_stats_tx *stats);
100 	void (*get_base_stats)(struct net_device *dev,
101 			       struct netdev_queue_stats_rx *rx,
102 			       struct netdev_queue_stats_tx *tx);
103 };
104 
105 /**
106  * struct netdev_queue_mgmt_ops - netdev ops for queue management
107  *
108  * @ndo_queue_mem_size: Size of the struct that describes a queue's memory.
109  *
110  * @ndo_queue_mem_alloc: Allocate memory for an RX queue at the specified index.
111  *			 The new memory is written at the specified address.
112  *
113  * @ndo_queue_mem_free:	Free memory from an RX queue.
114  *
115  * @ndo_queue_start:	Start an RX queue with the specified memory and at the
116  *			specified index.
117  *
118  * @ndo_queue_stop:	Stop the RX queue at the specified index. The stopped
119  *			queue's memory is written at the specified address.
120  */
121 struct netdev_queue_mgmt_ops {
122 	size_t			ndo_queue_mem_size;
123 	int			(*ndo_queue_mem_alloc)(struct net_device *dev,
124 						       void *per_queue_mem,
125 						       int idx);
126 	void			(*ndo_queue_mem_free)(struct net_device *dev,
127 						      void *per_queue_mem);
128 	int			(*ndo_queue_start)(struct net_device *dev,
129 						   void *per_queue_mem,
130 						   int idx);
131 	int			(*ndo_queue_stop)(struct net_device *dev,
132 						  void *per_queue_mem,
133 						  int idx);
134 };
135 
136 /**
137  * DOC: Lockless queue stopping / waking helpers.
138  *
139  * The netif_txq_maybe_stop() and __netif_txq_completed_wake()
140  * macros are designed to safely implement stopping
141  * and waking netdev queues without full lock protection.
142  *
143  * We assume that there can be no concurrent stop attempts and no concurrent
144  * wake attempts. The try-stop should happen from the xmit handler,
145  * while wake up should be triggered from NAPI poll context.
146  * The two may run concurrently (single producer, single consumer).
147  *
148  * The try-stop side is expected to run from the xmit handler and therefore
149  * it does not reschedule Tx (netif_tx_start_queue() instead of
150  * netif_tx_wake_queue()). Uses of the ``stop`` macros outside of the xmit
151  * handler may lead to xmit queue being enabled but not run.
152  * The waking side does not have similar context restrictions.
153  *
154  * The macros guarantee that rings will not remain stopped if there's
155  * space available, but they do *not* prevent false wake ups when
156  * the ring is full! Drivers should check for ring full at the start
157  * for the xmit handler.
158  *
159  * All descriptor ring indexes (and other relevant shared state) must
160  * be updated before invoking the macros.
161  */
162 
163 #define netif_txq_try_stop(txq, get_desc, start_thrs)			\
164 	({								\
165 		int _res;						\
166 									\
167 		netif_tx_stop_queue(txq);				\
168 		/* Producer index and stop bit must be visible		\
169 		 * to consumer before we recheck.			\
170 		 * Pairs with a barrier in __netif_txq_completed_wake(). \
171 		 */							\
172 		smp_mb__after_atomic();					\
173 									\
174 		/* We need to check again in a case another		\
175 		 * CPU has just made room available.			\
176 		 */							\
177 		_res = 0;						\
178 		if (unlikely(get_desc >= start_thrs)) {			\
179 			netif_tx_start_queue(txq);			\
180 			_res = -1;					\
181 		}							\
182 		_res;							\
183 	})								\
184 
185 /**
186  * netif_txq_maybe_stop() - locklessly stop a Tx queue, if needed
187  * @txq:	struct netdev_queue to stop/start
188  * @get_desc:	get current number of free descriptors (see requirements below!)
189  * @stop_thrs:	minimal number of available descriptors for queue to be left
190  *		enabled
191  * @start_thrs:	minimal number of descriptors to re-enable the queue, can be
192  *		equal to @stop_thrs or higher to avoid frequent waking
193  *
194  * All arguments may be evaluated multiple times, beware of side effects.
195  * @get_desc must be a formula or a function call, it must always
196  * return up-to-date information when evaluated!
197  * Expected to be used from ndo_start_xmit, see the comment on top of the file.
198  *
199  * Returns:
200  *	 0 if the queue was stopped
201  *	 1 if the queue was left enabled
202  *	-1 if the queue was re-enabled (raced with waking)
203  */
204 #define netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs)	\
205 	({								\
206 		int _res;						\
207 									\
208 		_res = 1;						\
209 		if (unlikely(get_desc < stop_thrs))			\
210 			_res = netif_txq_try_stop(txq, get_desc, start_thrs); \
211 		_res;							\
212 	})								\
213 
214 /* Variant of netdev_tx_completed_queue() which guarantees smp_mb() if
215  * @bytes != 0, regardless of kernel config.
216  */
217 static inline void
netdev_txq_completed_mb(struct netdev_queue * dev_queue,unsigned int pkts,unsigned int bytes)218 netdev_txq_completed_mb(struct netdev_queue *dev_queue,
219 			unsigned int pkts, unsigned int bytes)
220 {
221 	if (IS_ENABLED(CONFIG_BQL))
222 		netdev_tx_completed_queue(dev_queue, pkts, bytes);
223 	else if (bytes)
224 		smp_mb();
225 }
226 
227 /**
228  * __netif_txq_completed_wake() - locklessly wake a Tx queue, if needed
229  * @txq:	struct netdev_queue to stop/start
230  * @pkts:	number of packets completed
231  * @bytes:	number of bytes completed
232  * @get_desc:	get current number of free descriptors (see requirements below!)
233  * @start_thrs:	minimal number of descriptors to re-enable the queue
234  * @down_cond:	down condition, predicate indicating that the queue should
235  *		not be woken up even if descriptors are available
236  *
237  * All arguments may be evaluated multiple times.
238  * @get_desc must be a formula or a function call, it must always
239  * return up-to-date information when evaluated!
240  * Reports completed pkts/bytes to BQL.
241  *
242  * Returns:
243  *	 0 if the queue was woken up
244  *	 1 if the queue was already enabled (or disabled but @down_cond is true)
245  *	-1 if the queue was left unchanged (@start_thrs not reached)
246  */
247 #define __netif_txq_completed_wake(txq, pkts, bytes,			\
248 				   get_desc, start_thrs, down_cond)	\
249 	({								\
250 		int _res;						\
251 									\
252 		/* Report to BQL and piggy back on its barrier.		\
253 		 * Barrier makes sure that anybody stopping the queue	\
254 		 * after this point sees the new consumer index.	\
255 		 * Pairs with barrier in netif_txq_try_stop().		\
256 		 */							\
257 		netdev_txq_completed_mb(txq, pkts, bytes);		\
258 									\
259 		_res = -1;						\
260 		if (pkts && likely(get_desc >= start_thrs)) {		\
261 			_res = 1;					\
262 			if (unlikely(netif_tx_queue_stopped(txq)) &&	\
263 			    !(down_cond)) {				\
264 				netif_tx_wake_queue(txq);		\
265 				_res = 0;				\
266 			}						\
267 		}							\
268 		_res;							\
269 	})
270 
271 #define netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs) \
272 	__netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs, false)
273 
274 /* subqueue variants follow */
275 
276 #define netif_subqueue_try_stop(dev, idx, get_desc, start_thrs)		\
277 	({								\
278 		struct netdev_queue *txq;				\
279 									\
280 		txq = netdev_get_tx_queue(dev, idx);			\
281 		netif_txq_try_stop(txq, get_desc, start_thrs);		\
282 	})
283 
284 #define netif_subqueue_maybe_stop(dev, idx, get_desc, stop_thrs, start_thrs) \
285 	({								\
286 		struct netdev_queue *txq;				\
287 									\
288 		txq = netdev_get_tx_queue(dev, idx);			\
289 		netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs); \
290 	})
291 
292 #define netif_subqueue_completed_wake(dev, idx, pkts, bytes,		\
293 				      get_desc, start_thrs)		\
294 	({								\
295 		struct netdev_queue *txq;				\
296 									\
297 		txq = netdev_get_tx_queue(dev, idx);			\
298 		netif_txq_completed_wake(txq, pkts, bytes,		\
299 					 get_desc, start_thrs);		\
300 	})
301 
302 #endif
303