1 /* SPDX-License-Identifier: GPL-2.0
2 *
3 * Network memory
4 *
5 * Author: Mina Almasry <[email protected]>
6 */
7
8 #ifndef _NET_NETMEM_H
9 #define _NET_NETMEM_H
10
11 #include <linux/mm.h>
12 #include <net/net_debug.h>
13
14 /* net_iov */
15
16 DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers);
17
18 /* We overload the LSB of the struct page pointer to indicate whether it's
19 * a page or net_iov.
20 */
21 #define NET_IOV 0x01UL
22
23 struct net_iov {
24 unsigned long __unused_padding;
25 unsigned long pp_magic;
26 struct page_pool *pp;
27 struct dmabuf_genpool_chunk_owner *owner;
28 unsigned long dma_addr;
29 atomic_long_t pp_ref_count;
30 };
31
32 /* These fields in struct page are used by the page_pool and net stack:
33 *
34 * struct {
35 * unsigned long pp_magic;
36 * struct page_pool *pp;
37 * unsigned long _pp_mapping_pad;
38 * unsigned long dma_addr;
39 * atomic_long_t pp_ref_count;
40 * };
41 *
42 * We mirror the page_pool fields here so the page_pool can access these fields
43 * without worrying whether the underlying fields belong to a page or net_iov.
44 *
45 * The non-net stack fields of struct page are private to the mm stack and must
46 * never be mirrored to net_iov.
47 */
48 #define NET_IOV_ASSERT_OFFSET(pg, iov) \
49 static_assert(offsetof(struct page, pg) == \
50 offsetof(struct net_iov, iov))
51 NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic);
52 NET_IOV_ASSERT_OFFSET(pp, pp);
53 NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr);
54 NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
55 #undef NET_IOV_ASSERT_OFFSET
56
57 /* netmem */
58
59 /**
60 * typedef netmem_ref - a nonexistent type marking a reference to generic
61 * network memory.
62 *
63 * A netmem_ref currently is always a reference to a struct page. This
64 * abstraction is introduced so support for new memory types can be added.
65 *
66 * Use the supplied helpers to obtain the underlying memory pointer and fields.
67 */
68 typedef unsigned long __bitwise netmem_ref;
69
netmem_is_net_iov(const netmem_ref netmem)70 static inline bool netmem_is_net_iov(const netmem_ref netmem)
71 {
72 return (__force unsigned long)netmem & NET_IOV;
73 }
74
75 /**
76 * __netmem_to_page - unsafely get pointer to the &page backing @netmem
77 * @netmem: netmem reference to convert
78 *
79 * Unsafe version of netmem_to_page(). When @netmem is always page-backed,
80 * e.g. when it's a header buffer, performs faster and generates smaller
81 * object code (no check for the LSB, no WARN). When @netmem points to IOV,
82 * provokes undefined behaviour.
83 *
84 * Return: pointer to the &page (garbage if @netmem is not page-backed).
85 */
__netmem_to_page(netmem_ref netmem)86 static inline struct page *__netmem_to_page(netmem_ref netmem)
87 {
88 return (__force struct page *)netmem;
89 }
90
91 /* This conversion fails (returns NULL) if the netmem_ref is not struct page
92 * backed.
93 */
netmem_to_page(netmem_ref netmem)94 static inline struct page *netmem_to_page(netmem_ref netmem)
95 {
96 if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
97 return NULL;
98
99 return __netmem_to_page(netmem);
100 }
101
netmem_to_net_iov(netmem_ref netmem)102 static inline struct net_iov *netmem_to_net_iov(netmem_ref netmem)
103 {
104 if (netmem_is_net_iov(netmem))
105 return (struct net_iov *)((__force unsigned long)netmem &
106 ~NET_IOV);
107
108 DEBUG_NET_WARN_ON_ONCE(true);
109 return NULL;
110 }
111
net_iov_to_netmem(struct net_iov * niov)112 static inline netmem_ref net_iov_to_netmem(struct net_iov *niov)
113 {
114 return (__force netmem_ref)((unsigned long)niov | NET_IOV);
115 }
116
page_to_netmem(struct page * page)117 static inline netmem_ref page_to_netmem(struct page *page)
118 {
119 return (__force netmem_ref)page;
120 }
121
122 /**
123 * virt_to_netmem - convert virtual memory pointer to a netmem reference
124 * @data: host memory pointer to convert
125 *
126 * Return: netmem reference to the &page backing this virtual address.
127 */
virt_to_netmem(const void * data)128 static inline netmem_ref virt_to_netmem(const void *data)
129 {
130 return page_to_netmem(virt_to_page(data));
131 }
132
netmem_ref_count(netmem_ref netmem)133 static inline int netmem_ref_count(netmem_ref netmem)
134 {
135 /* The non-pp refcount of net_iov is always 1. On net_iov, we only
136 * support pp refcounting which uses the pp_ref_count field.
137 */
138 if (netmem_is_net_iov(netmem))
139 return 1;
140
141 return page_ref_count(netmem_to_page(netmem));
142 }
143
netmem_pfn_trace(netmem_ref netmem)144 static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
145 {
146 if (netmem_is_net_iov(netmem))
147 return 0;
148
149 return page_to_pfn(netmem_to_page(netmem));
150 }
151
__netmem_clear_lsb(netmem_ref netmem)152 static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem)
153 {
154 return (struct net_iov *)((__force unsigned long)netmem & ~NET_IOV);
155 }
156
157 /**
158 * __netmem_get_pp - unsafely get pointer to the &page_pool backing @netmem
159 * @netmem: netmem reference to get the pointer from
160 *
161 * Unsafe version of netmem_get_pp(). When @netmem is always page-backed,
162 * e.g. when it's a header buffer, performs faster and generates smaller
163 * object code (avoids clearing the LSB). When @netmem points to IOV,
164 * provokes invalid memory access.
165 *
166 * Return: pointer to the &page_pool (garbage if @netmem is not page-backed).
167 */
__netmem_get_pp(netmem_ref netmem)168 static inline struct page_pool *__netmem_get_pp(netmem_ref netmem)
169 {
170 return __netmem_to_page(netmem)->pp;
171 }
172
netmem_get_pp(netmem_ref netmem)173 static inline struct page_pool *netmem_get_pp(netmem_ref netmem)
174 {
175 return __netmem_clear_lsb(netmem)->pp;
176 }
177
netmem_get_pp_ref_count_ref(netmem_ref netmem)178 static inline atomic_long_t *netmem_get_pp_ref_count_ref(netmem_ref netmem)
179 {
180 return &__netmem_clear_lsb(netmem)->pp_ref_count;
181 }
182
netmem_is_pref_nid(netmem_ref netmem,int pref_nid)183 static inline bool netmem_is_pref_nid(netmem_ref netmem, int pref_nid)
184 {
185 /* NUMA node preference only makes sense if we're allocating
186 * system memory. Memory providers (which give us net_iovs)
187 * choose for us.
188 */
189 if (netmem_is_net_iov(netmem))
190 return true;
191
192 return page_to_nid(netmem_to_page(netmem)) == pref_nid;
193 }
194
netmem_compound_head(netmem_ref netmem)195 static inline netmem_ref netmem_compound_head(netmem_ref netmem)
196 {
197 /* niov are never compounded */
198 if (netmem_is_net_iov(netmem))
199 return netmem;
200
201 return page_to_netmem(compound_head(netmem_to_page(netmem)));
202 }
203
204 /**
205 * __netmem_address - unsafely get pointer to the memory backing @netmem
206 * @netmem: netmem reference to get the pointer for
207 *
208 * Unsafe version of netmem_address(). When @netmem is always page-backed,
209 * e.g. when it's a header buffer, performs faster and generates smaller
210 * object code (no check for the LSB). When @netmem points to IOV, provokes
211 * undefined behaviour.
212 *
213 * Return: pointer to the memory (garbage if @netmem is not page-backed).
214 */
__netmem_address(netmem_ref netmem)215 static inline void *__netmem_address(netmem_ref netmem)
216 {
217 return page_address(__netmem_to_page(netmem));
218 }
219
netmem_address(netmem_ref netmem)220 static inline void *netmem_address(netmem_ref netmem)
221 {
222 if (netmem_is_net_iov(netmem))
223 return NULL;
224
225 return __netmem_address(netmem);
226 }
227
228 /**
229 * netmem_is_pfmemalloc - check if @netmem was allocated under memory pressure
230 * @netmem: netmem reference to check
231 *
232 * Return: true if @netmem is page-backed and the page was allocated under
233 * memory pressure, false otherwise.
234 */
netmem_is_pfmemalloc(netmem_ref netmem)235 static inline bool netmem_is_pfmemalloc(netmem_ref netmem)
236 {
237 if (netmem_is_net_iov(netmem))
238 return false;
239
240 return page_is_pfmemalloc(netmem_to_page(netmem));
241 }
242
netmem_get_dma_addr(netmem_ref netmem)243 static inline unsigned long netmem_get_dma_addr(netmem_ref netmem)
244 {
245 return __netmem_clear_lsb(netmem)->dma_addr;
246 }
247
248 #endif /* _NET_NETMEM_H */
249