1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2019 Facebook
4  * Copyright 2020 Google LLC.
5  */
6 
7 #ifndef _BPF_LOCAL_STORAGE_H
8 #define _BPF_LOCAL_STORAGE_H
9 
10 #include <linux/bpf.h>
11 #include <linux/filter.h>
12 #include <linux/rculist.h>
13 #include <linux/list.h>
14 #include <linux/hash.h>
15 #include <linux/types.h>
16 #include <linux/bpf_mem_alloc.h>
17 #include <uapi/linux/btf.h>
18 
19 #define BPF_LOCAL_STORAGE_CACHE_SIZE	16
20 
21 #define bpf_rcu_lock_held()                                                    \
22 	(rcu_read_lock_held() || rcu_read_lock_trace_held() ||                 \
23 	 rcu_read_lock_bh_held())
24 struct bpf_local_storage_map_bucket {
25 	struct hlist_head list;
26 	raw_spinlock_t lock;
27 };
28 
29 /* Thp map is not the primary owner of a bpf_local_storage_elem.
30  * Instead, the container object (eg. sk->sk_bpf_storage) is.
31  *
32  * The map (bpf_local_storage_map) is for two purposes
33  * 1. Define the size of the "local storage".  It is
34  *    the map's value_size.
35  *
36  * 2. Maintain a list to keep track of all elems such
37  *    that they can be cleaned up during the map destruction.
38  *
39  * When a bpf local storage is being looked up for a
40  * particular object,  the "bpf_map" pointer is actually used
41  * as the "key" to search in the list of elem in
42  * the respective bpf_local_storage owned by the object.
43  *
44  * e.g. sk->sk_bpf_storage is the mini-map with the "bpf_map" pointer
45  * as the searching key.
46  */
47 struct bpf_local_storage_map {
48 	struct bpf_map map;
49 	/* Lookup elem does not require accessing the map.
50 	 *
51 	 * Updating/Deleting requires a bucket lock to
52 	 * link/unlink the elem from the map.  Having
53 	 * multiple buckets to improve contention.
54 	 */
55 	struct bpf_local_storage_map_bucket *buckets;
56 	u32 bucket_log;
57 	u16 elem_size;
58 	u16 cache_idx;
59 	struct bpf_mem_alloc selem_ma;
60 	struct bpf_mem_alloc storage_ma;
61 	bool bpf_ma;
62 };
63 
64 struct bpf_local_storage_data {
65 	/* smap is used as the searching key when looking up
66 	 * from the object's bpf_local_storage.
67 	 *
68 	 * Put it in the same cacheline as the data to minimize
69 	 * the number of cachelines accessed during the cache hit case.
70 	 */
71 	struct bpf_local_storage_map __rcu *smap;
72 	u8 data[] __aligned(8);
73 };
74 
75 /* Linked to bpf_local_storage and bpf_local_storage_map */
76 struct bpf_local_storage_elem {
77 	struct hlist_node map_node;	/* Linked to bpf_local_storage_map */
78 	struct hlist_node snode;	/* Linked to bpf_local_storage */
79 	struct bpf_local_storage __rcu *local_storage;
80 	union {
81 		struct rcu_head rcu;
82 		struct hlist_node free_node;	/* used to postpone
83 						 * bpf_selem_free
84 						 * after raw_spin_unlock
85 						 */
86 	};
87 	/* 8 bytes hole */
88 	/* The data is stored in another cacheline to minimize
89 	 * the number of cachelines access during a cache hit.
90 	 */
91 	struct bpf_local_storage_data sdata ____cacheline_aligned;
92 };
93 
94 struct bpf_local_storage {
95 	struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE];
96 	struct bpf_local_storage_map __rcu *smap;
97 	struct hlist_head list; /* List of bpf_local_storage_elem */
98 	void *owner;		/* The object that owns the above "list" of
99 				 * bpf_local_storage_elem.
100 				 */
101 	struct rcu_head rcu;
102 	raw_spinlock_t lock;	/* Protect adding/removing from the "list" */
103 };
104 
105 /* U16_MAX is much more than enough for sk local storage
106  * considering a tcp_sock is ~2k.
107  */
108 #define BPF_LOCAL_STORAGE_MAX_VALUE_SIZE				       \
109 	min_t(u32,                                                             \
110 	      (KMALLOC_MAX_SIZE - MAX_BPF_STACK -                              \
111 	       sizeof(struct bpf_local_storage_elem)),                         \
112 	      (U16_MAX - sizeof(struct bpf_local_storage_elem)))
113 
114 #define SELEM(_SDATA)                                                          \
115 	container_of((_SDATA), struct bpf_local_storage_elem, sdata)
116 #define SDATA(_SELEM) (&(_SELEM)->sdata)
117 
118 #define BPF_LOCAL_STORAGE_CACHE_SIZE	16
119 
120 struct bpf_local_storage_cache {
121 	spinlock_t idx_lock;
122 	u64 idx_usage_counts[BPF_LOCAL_STORAGE_CACHE_SIZE];
123 };
124 
125 #define DEFINE_BPF_STORAGE_CACHE(name)				\
126 static struct bpf_local_storage_cache name = {			\
127 	.idx_lock = __SPIN_LOCK_UNLOCKED(name.idx_lock),	\
128 }
129 
130 /* Helper functions for bpf_local_storage */
131 int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
132 
133 struct bpf_map *
134 bpf_local_storage_map_alloc(union bpf_attr *attr,
135 			    struct bpf_local_storage_cache *cache,
136 			    bool bpf_ma);
137 
138 void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
139 				      struct bpf_local_storage_map *smap,
140 				      struct bpf_local_storage_elem *selem);
141 /* If cacheit_lockit is false, this lookup function is lockless */
142 static inline struct bpf_local_storage_data *
bpf_local_storage_lookup(struct bpf_local_storage * local_storage,struct bpf_local_storage_map * smap,bool cacheit_lockit)143 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
144 			 struct bpf_local_storage_map *smap,
145 			 bool cacheit_lockit)
146 {
147 	struct bpf_local_storage_data *sdata;
148 	struct bpf_local_storage_elem *selem;
149 
150 	/* Fast path (cache hit) */
151 	sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
152 				      bpf_rcu_lock_held());
153 	if (sdata && rcu_access_pointer(sdata->smap) == smap)
154 		return sdata;
155 
156 	/* Slow path (cache miss) */
157 	hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
158 				  rcu_read_lock_trace_held())
159 		if (rcu_access_pointer(SDATA(selem)->smap) == smap)
160 			break;
161 
162 	if (!selem)
163 		return NULL;
164 	if (cacheit_lockit)
165 		__bpf_local_storage_insert_cache(local_storage, smap, selem);
166 	return SDATA(selem);
167 }
168 
169 void bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
170 
171 void bpf_local_storage_map_free(struct bpf_map *map,
172 				struct bpf_local_storage_cache *cache,
173 				int __percpu *busy_counter);
174 
175 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
176 				    const struct btf *btf,
177 				    const struct btf_type *key_type,
178 				    const struct btf_type *value_type);
179 
180 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
181 				   struct bpf_local_storage_elem *selem);
182 
183 void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now);
184 
185 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
186 			struct bpf_local_storage_elem *selem);
187 
188 struct bpf_local_storage_elem *
189 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
190 		bool charge_mem, bool swap_uptrs, gfp_t gfp_flags);
191 
192 void bpf_selem_free(struct bpf_local_storage_elem *selem,
193 		    struct bpf_local_storage_map *smap,
194 		    bool reuse_now);
195 
196 int
197 bpf_local_storage_alloc(void *owner,
198 			struct bpf_local_storage_map *smap,
199 			struct bpf_local_storage_elem *first_selem,
200 			gfp_t gfp_flags);
201 
202 struct bpf_local_storage_data *
203 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
204 			 void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags);
205 
206 u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
207 
208 #endif /* _BPF_LOCAL_STORAGE_H */
209