1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 // The resulting .o needs to load on Android T+
18 #define BPFLOADER_MIN_VER BPFLOADER_MAINLINE_T_VERSION
19 
20 #include "bpf_net_helpers.h"
21 #include "netd.h"
22 
23 // This is defined for cgroup bpf filter only.
24 static const int DROP = 0;
25 static const int PASS = 1;
26 static const int DROP_UNLESS_DNS = 2;  // internal to our program
27 
28 // Used for 'bool enable_tracing'
29 static const bool TRACE_ON = true;
30 static const bool TRACE_OFF = false;
31 
32 // offsetof(struct iphdr, ihl) -- but that's a bitfield
33 #define IPPROTO_IHL_OFF 0
34 
35 // This is offsetof(struct tcphdr, "32 bit tcp flag field")
36 // The tcp flags are after be16 source, dest & be32 seq, ack_seq, hence 12 bytes in.
37 //
38 // Note that TCP_FLAG_{ACK,PSH,RST,SYN,FIN} are htonl(0x00{10,08,04,02,01}0000)
39 // see include/uapi/linux/tcp.h
40 #define TCP_FLAG32_OFF 12
41 
42 #define TCP_FLAG8_OFF (TCP_FLAG32_OFF + 1)
43 
44 // For maps netd does not need to access
45 #define DEFINE_BPF_MAP_NO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
46     DEFINE_BPF_MAP_EXT(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries,         \
47                        AID_ROOT, AID_NET_BW_ACCT, 0060, "fs_bpf_net_shared", "",   \
48                        PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER,              \
49                        LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
50 
51 // For maps netd only needs read only access to
52 #define DEFINE_BPF_MAP_RO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries)  \
53     DEFINE_BPF_MAP_EXT(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries,          \
54                        AID_ROOT, AID_NET_BW_ACCT, 0460, "fs_bpf_netd_readonly", "", \
55                        PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER,               \
56                        LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
57 
58 // For maps netd needs to be able to read and write
59 #define DEFINE_BPF_MAP_RW_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
60     DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, \
61                        AID_ROOT, AID_NET_BW_ACCT, 0660)
62 
63 // Bpf map arrays on creation are preinitialized to 0 and do not support deletion of a key,
64 // see: kernel/bpf/arraymap.c array_map_delete_elem() returns -EINVAL (from both syscall and ebpf)
65 // Additionally on newer kernels the bpf jit can optimize out the lookups.
66 // only valid indexes are [0..CONFIGURATION_MAP_SIZE-1]
67 DEFINE_BPF_MAP_RO_NETD(configuration_map, ARRAY, uint32_t, uint32_t, CONFIGURATION_MAP_SIZE)
68 
69 // TODO: consider whether we can merge some of these maps
70 // for example it might be possible to merge 2 or 3 of:
71 //   uid_counterset_map + uid_owner_map + uid_permission_map
72 DEFINE_BPF_MAP_NO_NETD(blocked_ports_map, ARRAY, int, uint64_t,
73                        1024 /* 64K ports -> 1024 u64s */)
74 DEFINE_BPF_MAP_RW_NETD(cookie_tag_map, HASH, uint64_t, UidTagValue, COOKIE_UID_MAP_SIZE)
75 DEFINE_BPF_MAP_NO_NETD(uid_counterset_map, HASH, uint32_t, uint8_t, UID_COUNTERSET_MAP_SIZE)
76 DEFINE_BPF_MAP_NO_NETD(app_uid_stats_map, HASH, uint32_t, StatsValue, APP_STATS_MAP_SIZE)
77 DEFINE_BPF_MAP_RO_NETD(stats_map_A, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
78 DEFINE_BPF_MAP_RO_NETD(stats_map_B, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
79 DEFINE_BPF_MAP_NO_NETD(iface_stats_map, HASH, uint32_t, StatsValue, IFACE_STATS_MAP_SIZE)
80 DEFINE_BPF_MAP_RO_NETD(uid_owner_map, HASH, uint32_t, UidOwnerValue, UID_OWNER_MAP_SIZE)
81 DEFINE_BPF_MAP_RO_NETD(uid_permission_map, HASH, uint32_t, uint8_t, UID_OWNER_MAP_SIZE)
82 DEFINE_BPF_MAP_NO_NETD(ingress_discard_map, HASH, IngressDiscardKey, IngressDiscardValue,
83                        INGRESS_DISCARD_MAP_SIZE)
84 
85 DEFINE_BPF_MAP_RW_NETD(lock_array_test_map, ARRAY, uint32_t, bool, 1)
86 DEFINE_BPF_MAP_RW_NETD(lock_hash_test_map, HASH, uint32_t, bool, 1)
87 
88 /* never actually used from ebpf */
89 DEFINE_BPF_MAP_NO_NETD(iface_index_name_map, HASH, uint32_t, IfaceValue, IFACE_INDEX_NAME_MAP_SIZE)
90 
91 // A single-element configuration array, packet tracing is enabled when 'true'.
92 DEFINE_BPF_MAP_EXT(packet_trace_enabled_map, ARRAY, uint32_t, bool, 1,
93                    AID_ROOT, AID_SYSTEM, 0060, "fs_bpf_net_shared", "", PRIVATE,
94                    BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER, LOAD_ON_ENG,
95                    LOAD_ON_USER, LOAD_ON_USERDEBUG)
96 
97 // A ring buffer on which packet information is pushed.
98 DEFINE_BPF_RINGBUF_EXT(packet_trace_ringbuf, PacketTrace, PACKET_TRACE_BUF_SIZE,
99                        AID_ROOT, AID_SYSTEM, 0060, "fs_bpf_net_shared", "", PRIVATE,
100                        BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER, LOAD_ON_ENG,
101                        LOAD_ON_USER, LOAD_ON_USERDEBUG);
102 
DEFINE_BPF_MAP_RO_NETD(data_saver_enabled_map,ARRAY,uint32_t,bool,DATA_SAVER_ENABLED_MAP_SIZE)103 DEFINE_BPF_MAP_RO_NETD(data_saver_enabled_map, ARRAY, uint32_t, bool,
104                        DATA_SAVER_ENABLED_MAP_SIZE)
105 
106 // iptables xt_bpf programs need to be usable by both netd and netutils_wrappers
107 // selinux contexts, because even non-xt_bpf iptables mutations are implemented as
108 // a full table dump, followed by an update in userspace, and then a reload into the kernel,
109 // where any already in-use xt_bpf matchers are serialized as the path to the pinned
110 // program (see XT_BPF_MODE_PATH_PINNED) and then the iptables binary (or rather
111 // the kernel acting on behalf of it) must be able to retrieve the pinned program
112 // for the reload to succeed
113 #define DEFINE_XTBPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
114     DEFINE_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog)
115 
116 // programs that need to be usable by netd, but not by netutils_wrappers
117 // (this is because these are currently attached by the mainline provided libnetd_updatable .so
118 // which is loaded into netd and thus runs as netd uid/gid/selinux context)
119 #define DEFINE_NETD_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, minKV, maxKV) \
120     DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog,                               \
121                         minKV, maxKV, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, MANDATORY,            \
122                         "fs_bpf_netd_readonly", "", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
123 
124 #define DEFINE_NETD_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv) \
125     DEFINE_NETD_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, KVER_INF)
126 
127 #define DEFINE_NETD_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
128     DEFINE_NETD_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE)
129 
130 #define DEFINE_NETD_V_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, minKV)            \
131     DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, minKV,                        \
132                         KVER_INF, BPFLOADER_MAINLINE_V_VERSION, BPFLOADER_MAX_VER, MANDATORY,     \
133                         "fs_bpf_netd_readonly", "", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
134 
135 // programs that only need to be usable by the system server
136 #define DEFINE_SYS_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
137     DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF,  \
138                         BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, MANDATORY, \
139                         "fs_bpf_net_shared", "", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
140 
141 /*
142  * Note: this blindly assumes an MTU of 1500, and that packets > MTU are always TCP,
143  * and that TCP is using the Linux default settings with TCP timestamp option enabled
144  * which uses 12 TCP option bytes per frame.
145  *
146  * These are not unreasonable assumptions:
147  *
148  * The internet does not really support MTUs greater than 1500, so most TCP traffic will
149  * be at that MTU, or slightly below it (worst case our upwards adjustment is too small).
150  *
151  * The chance our traffic isn't IP at all is basically zero, so the IP overhead correction
152  * is bound to be needed.
153  *
154  * Furthermore, the likelyhood that we're having to deal with GSO (ie. > MTU) packets that
155  * are not IP/TCP is pretty small (few other things are supported by Linux) and worse case
156  * our extra overhead will be slightly off, but probably still better than assuming none.
157  *
158  * Most servers are also Linux and thus support/default to using TCP timestamp option
159  * (and indeed TCP timestamp option comes from RFC 1323 titled "TCP Extensions for High
160  * Performance" which also defined TCP window scaling and are thus absolutely ancient...).
161  *
162  * All together this should be more correct than if we simply ignored GSO frames
163  * (ie. counted them as single packets with no extra overhead)
164  *
165  * Especially since the number of packets is important for any future clat offload correction.
166  * (which adjusts upward by 20 bytes per packet to account for ipv4 -> ipv6 header conversion)
167  */
168 #define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey)                                            \
169     static __always_inline inline void update_##the_stats_map(const struct __sk_buff* const skb, \
170                                                               const TypeOfKey* const key,        \
171                                                               const struct egress_bool egress,   \
172                                                      __unused const struct kver_uint kver) {     \
173         StatsValue* value = bpf_##the_stats_map##_lookup_elem(key);                              \
174         if (!value) {                                                                            \
175             StatsValue newValue = {};                                                            \
176             bpf_##the_stats_map##_update_elem(key, &newValue, BPF_NOEXIST);                      \
177             value = bpf_##the_stats_map##_lookup_elem(key);                                      \
178         }                                                                                        \
179         if (value) {                                                                             \
180             const int mtu = 1500;                                                                \
181             uint64_t packets = 1;                                                                \
182             uint64_t bytes = skb->len;                                                           \
183             if (bytes > mtu) {                                                                   \
184                 bool is_ipv6 = (skb->protocol == htons(ETH_P_IPV6));                             \
185                 int ip_overhead = (is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr));     \
186                 int tcp_overhead = ip_overhead + sizeof(struct tcphdr) + 12;                     \
187                 int mss = mtu - tcp_overhead;                                                    \
188                 uint64_t payload = bytes - tcp_overhead;                                         \
189                 packets = (payload + mss - 1) / mss;                                             \
190                 bytes = tcp_overhead * packets + payload;                                        \
191             }                                                                                    \
192             if (egress.egress) {                                                                 \
193                 __sync_fetch_and_add(&value->txPackets, packets);                                \
194                 __sync_fetch_and_add(&value->txBytes, bytes);                                    \
195             } else {                                                                             \
196                 __sync_fetch_and_add(&value->rxPackets, packets);                                \
197                 __sync_fetch_and_add(&value->rxBytes, bytes);                                    \
198             }                                                                                    \
199         }                                                                                        \
200     }
201 
202 DEFINE_UPDATE_STATS(app_uid_stats_map, uint32_t)
203 DEFINE_UPDATE_STATS(iface_stats_map, uint32_t)
204 DEFINE_UPDATE_STATS(stats_map_A, StatsKey)
205 DEFINE_UPDATE_STATS(stats_map_B, StatsKey)
206 
207 // both of these return 0 on success or -EFAULT on failure (and zero out the buffer)
208 static __always_inline inline int bpf_skb_load_bytes_net(const struct __sk_buff* const skb,
209                                                          const int L3_off,
210                                                          void* const to,
211                                                          const int len,
212                                                          const struct kver_uint kver) {
213     // 'kver' (here and throughout) is the compile time guaranteed minimum kernel version,
214     // ie. we're building (a version of) the bpf program for kver (or newer!) kernels.
215     //
216     // 4.19+ kernels support the 'bpf_skb_load_bytes_relative()' bpf helper function,
217     // so we can use it.  On pre-4.19 kernels we cannot use the relative load helper,
218     // and thus will simply get things wrong if there's any L2 (ethernet) header in the skb.
219     //
220     // Luckily, for cellular traffic, there likely isn't any, as cell is usually 'rawip'.
221     //
222     // However, this does mean that wifi (and ethernet) on 4.14 is basically a lost cause:
223     // we'll be making decisions based on the *wrong* bytes (fetched from the wrong offset),
224     // because the 'L3_off' passed to bpf_skb_load_bytes() should be increased by l2_header_size,
225     // which for ethernet is 14 and not 0 like it is for rawip.
226     //
227     // For similar reasons this will fail with non-offloaded VLAN tags on < 4.19 kernels,
228     // since those extend the ethernet header from 14 to 18 bytes.
229     return KVER_IS_AT_LEAST(kver, 4, 19, 0)
230         ? bpf_skb_load_bytes_relative(skb, L3_off, to, len, BPF_HDR_START_NET)
231         : bpf_skb_load_bytes(skb, L3_off, to, len);
232 }
233 
do_packet_tracing(const struct __sk_buff * const skb,const struct egress_bool egress,const uint32_t uid,const uint32_t tag,const bool enable_tracing,const struct kver_uint kver)234 static __always_inline inline void do_packet_tracing(
235         const struct __sk_buff* const skb, const struct egress_bool egress, const uint32_t uid,
236         const uint32_t tag, const bool enable_tracing, const struct kver_uint kver) {
237     if (!enable_tracing) return;
238     if (!KVER_IS_AT_LEAST(kver, 5, 8, 0)) return;
239 
240     uint32_t mapKey = 0;
241     bool* traceConfig = bpf_packet_trace_enabled_map_lookup_elem(&mapKey);
242     if (traceConfig == NULL) return;
243     if (*traceConfig == false) return;
244 
245     PacketTrace* pkt = bpf_packet_trace_ringbuf_reserve();
246     if (pkt == NULL) return;
247 
248     // Errors from bpf_skb_load_bytes_net are ignored to favor returning something
249     // over returning nothing. In the event of an error, the kernel will fill in
250     // zero for the destination memory. Do not change the default '= 0' below.
251 
252     uint8_t proto = 0;
253     uint8_t L4_off = 0;
254     uint8_t ipVersion = 0;
255     if (skb->protocol == htons(ETH_P_IP)) {
256         (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(protocol), &proto, sizeof(proto), kver);
257         (void)bpf_skb_load_bytes_net(skb, IPPROTO_IHL_OFF, &L4_off, sizeof(L4_off), kver);
258         L4_off = (L4_off & 0x0F) * 4;  // IHL calculation.
259         ipVersion = 4;
260     } else if (skb->protocol == htons(ETH_P_IPV6)) {
261         (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(nexthdr), &proto, sizeof(proto), kver);
262         L4_off = sizeof(struct ipv6hdr);
263         ipVersion = 6;
264         // skip over a *single* HOPOPTS or DSTOPTS extension header (if present)
265         if (proto == IPPROTO_HOPOPTS || proto == IPPROTO_DSTOPTS) {
266             struct {
267                 uint8_t proto, len;
268             } ext_hdr;
269             if (!bpf_skb_load_bytes_net(skb, L4_off, &ext_hdr, sizeof(ext_hdr), kver)) {
270                 proto = ext_hdr.proto;
271                 L4_off += (ext_hdr.len + 1) * 8;
272             }
273         }
274     }
275 
276     uint8_t flags = 0;
277     __be16 sport = 0, dport = 0;
278     if (L4_off >= 20) {
279       switch (proto) {
280         case IPPROTO_TCP:
281           (void)bpf_skb_load_bytes_net(skb, L4_off + TCP_FLAG8_OFF, &flags, sizeof(flags), kver);
282           // fallthrough
283         case IPPROTO_DCCP:
284         case IPPROTO_UDP:
285         case IPPROTO_UDPLITE:
286         case IPPROTO_SCTP:
287           // all of these L4 protocols start with be16 src & dst port
288           (void)bpf_skb_load_bytes_net(skb, L4_off + 0, &sport, sizeof(sport), kver);
289           (void)bpf_skb_load_bytes_net(skb, L4_off + 2, &dport, sizeof(dport), kver);
290           break;
291         case IPPROTO_ICMP:
292         case IPPROTO_ICMPV6:
293           // Both IPv4 and IPv6 icmp start with u8 type & code, which we store in the bottom
294           // (ie. second) byte of sport/dport (which are be16s), the top byte is already zero.
295           (void)bpf_skb_load_bytes_net(skb, L4_off + 0, (char *)&sport + 1, 1, kver); //type
296           (void)bpf_skb_load_bytes_net(skb, L4_off + 1, (char *)&dport + 1, 1, kver); //code
297           break;
298       }
299     }
300 
301     pkt->timestampNs = bpf_ktime_get_boot_ns();
302     pkt->ifindex = skb->ifindex;
303     pkt->length = skb->len;
304 
305     pkt->uid = uid;
306     pkt->tag = tag;
307     pkt->sport = sport;
308     pkt->dport = dport;
309 
310     pkt->egress = egress.egress;
311     pkt->wakeup = !egress.egress && (skb->mark & 0x80000000);  // Fwmark.ingress_cpu_wakeup
312     pkt->ipProto = proto;
313     pkt->tcpFlags = flags;
314     pkt->ipVersion = ipVersion;
315 
316     bpf_packet_trace_ringbuf_submit(pkt);
317 }
318 
skip_owner_match(struct __sk_buff * skb,const struct egress_bool egress,const struct kver_uint kver)319 static __always_inline inline bool skip_owner_match(struct __sk_buff* skb,
320                                                     const struct egress_bool egress,
321                                                     const struct kver_uint kver) {
322     uint32_t flag = 0;
323     if (skb->protocol == htons(ETH_P_IP)) {
324         uint8_t proto;
325         // no need to check for success, proto will be zeroed if bpf_skb_load_bytes_net() fails
326         (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(protocol), &proto, sizeof(proto), kver);
327         if (proto == IPPROTO_ESP) return true;
328         if (proto != IPPROTO_TCP) return false;  // handles read failure above
329         uint8_t ihl;
330         // we don't check for success, as this cannot fail, as it is earlier in the packet than
331         // proto, the reading of which must have succeeded, additionally the next read
332         // (a little bit deeper in the packet in spite of ihl being zeroed) of the tcp flags
333         // field will also fail, and that failure we already handle correctly
334         // (we also don't check that ihl in [0x45,0x4F] nor that ipv4 header checksum is correct)
335         (void)bpf_skb_load_bytes_net(skb, IPPROTO_IHL_OFF, &ihl, sizeof(ihl), kver);
336         // if the read below fails, we'll just assume no TCP flags are set, which is fine.
337         (void)bpf_skb_load_bytes_net(skb, (ihl & 0xF) * 4 + TCP_FLAG32_OFF,
338                                      &flag, sizeof(flag), kver);
339     } else if (skb->protocol == htons(ETH_P_IPV6)) {
340         uint8_t proto;
341         // no need to check for success, proto will be zeroed if bpf_skb_load_bytes_net() fails
342         (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(nexthdr), &proto, sizeof(proto), kver);
343         if (proto == IPPROTO_ESP) return true;
344         if (proto != IPPROTO_TCP) return false;  // handles read failure above
345         // if the read below fails, we'll just assume no TCP flags are set, which is fine.
346         (void)bpf_skb_load_bytes_net(skb, sizeof(struct ipv6hdr) + TCP_FLAG32_OFF,
347                                      &flag, sizeof(flag), kver);
348     } else {
349         return false;
350     }
351     // Always allow RST's, and additionally allow ingress FINs
352     return flag & (TCP_FLAG_RST | (egress.egress ? 0 : TCP_FLAG_FIN));  // false on read failure
353 }
354 
getConfig(uint32_t configKey)355 static __always_inline inline BpfConfig getConfig(uint32_t configKey) {
356     uint32_t mapSettingKey = configKey;
357     BpfConfig* config = bpf_configuration_map_lookup_elem(&mapSettingKey);
358     if (!config) {
359         // Couldn't read configuration entry. Assume everything is disabled.
360         return DEFAULT_CONFIG;
361     }
362     return *config;
363 }
364 
ingress_should_discard(struct __sk_buff * skb,const struct kver_uint kver)365 static __always_inline inline bool ingress_should_discard(struct __sk_buff* skb,
366                                                           const struct kver_uint kver) {
367     // Require 4.19, since earlier kernels don't have bpf_skb_load_bytes_relative() which
368     // provides relative to L3 header reads.  Without that we could fetch the wrong bytes.
369     // Additionally earlier bpf verifiers are much harder to please.
370     if (!KVER_IS_AT_LEAST(kver, 4, 19, 0)) return false;
371 
372     IngressDiscardKey k = {};
373     if (skb->protocol == htons(ETH_P_IP)) {
374         k.daddr.s6_addr32[2] = htonl(0xFFFF);
375         (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(daddr), &k.daddr.s6_addr32[3], 4, kver);
376     } else if (skb->protocol == htons(ETH_P_IPV6)) {
377         (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(daddr), &k.daddr, sizeof(k.daddr), kver);
378     } else {
379         return false; // non IPv4/IPv6, so no IP to match on
380     }
381 
382     // we didn't check for load success, because destination bytes will be zeroed if
383     // bpf_skb_load_bytes_net() fails, instead we rely on daddr of '::' and '::ffff:0.0.0.0'
384     // never being present in the map itself
385 
386     IngressDiscardValue* v = bpf_ingress_discard_map_lookup_elem(&k);
387     if (!v) return false;  // lookup failure -> no protection in place -> allow
388     // if (skb->ifindex == 1) return false;  // allow 'lo', but can't happen - see callsite
389     if (skb->ifindex == v->iif[0]) return false;  // allowed interface
390     if (skb->ifindex == v->iif[1]) return false;  // allowed interface
391     return true;  // disallowed interface
392 }
393 
bpf_owner_match(struct __sk_buff * skb,uint32_t uid,const struct egress_bool egress,const struct kver_uint kver)394 static __always_inline inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid,
395                                                   const struct egress_bool egress,
396                                                   const struct kver_uint kver) {
397     if (is_system_uid(uid)) return PASS;
398 
399     if (skip_owner_match(skb, egress, kver)) return PASS;
400 
401     BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
402 
403     // BACKGROUND match does not apply to loopback traffic
404     if (skb->ifindex == 1) enabledRules &= ~BACKGROUND_MATCH;
405 
406     UidOwnerValue* uidEntry = bpf_uid_owner_map_lookup_elem(&uid);
407     uint32_t uidRules = uidEntry ? uidEntry->rule : 0;
408     uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0;
409 
410     if (isBlockedByUidRules(enabledRules, uidRules)) return DROP;
411 
412     if (!egress.egress && skb->ifindex != 1) {
413         if (ingress_should_discard(skb, kver)) return DROP;
414         if (uidRules & IIF_MATCH) {
415             if (allowed_iif && skb->ifindex != allowed_iif) {
416                 // Drops packets not coming from lo nor the allowed interface
417                 // allowed interface=0 is a wildcard and does not drop packets
418                 return DROP_UNLESS_DNS;
419             }
420         } else if (uidRules & LOCKDOWN_VPN_MATCH) {
421             // Drops packets not coming from lo and rule does not have IIF_MATCH but has
422             // LOCKDOWN_VPN_MATCH
423             return DROP_UNLESS_DNS;
424         }
425     }
426     return PASS;
427 }
428 
update_stats_with_config(const uint32_t selectedMap,const struct __sk_buff * const skb,const StatsKey * const key,const struct egress_bool egress,const struct kver_uint kver)429 static __always_inline inline void update_stats_with_config(const uint32_t selectedMap,
430                                                             const struct __sk_buff* const skb,
431                                                             const StatsKey* const key,
432                                                             const struct egress_bool egress,
433                                                             const struct kver_uint kver) {
434     if (selectedMap == SELECT_MAP_A) {
435         update_stats_map_A(skb, key, egress, kver);
436     } else {
437         update_stats_map_B(skb, key, egress, kver);
438     }
439 }
440 
bpf_traffic_account(struct __sk_buff * skb,const struct egress_bool egress,const bool enable_tracing,const struct kver_uint kver)441 static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb,
442                                                       const struct egress_bool egress,
443                                                       const bool enable_tracing,
444                                                       const struct kver_uint kver) {
445     // sock_uid will be 'overflowuid' if !sk_fullsock(sk_to_full_sk(skb->sk))
446     uint32_t sock_uid = bpf_get_socket_uid(skb);
447 
448     // kernel's DEFAULT_OVERFLOWUID is 65534, this is the overflow 'nobody' uid,
449     // usually this being returned means that skb->sk is NULL during RX
450     // (early decap socket lookup failure), which commonly happens for incoming
451     // packets to an unconnected udp socket.
452     // But it can also happen for egress from a timewait socket.
453     // Let's treat such cases as 'root' which is_system_uid()
454     if (sock_uid == 65534) sock_uid = 0;
455 
456     uint64_t cookie = bpf_get_socket_cookie(skb);  // 0 iff !skb->sk
457     UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
458     uint32_t uid, tag;
459     if (utag) {
460         uid = utag->uid;
461         tag = utag->tag;
462     } else {
463         uid = sock_uid;
464         tag = 0;
465     }
466 
467     // Always allow and never count clat traffic. Only the IPv4 traffic on the stacked
468     // interface is accounted for and subject to usage restrictions.
469     // CLAT IPv6 TX sockets are *always* tagged with CLAT uid, see tagSocketAsClat()
470     // CLAT daemon receives via an untagged AF_PACKET socket.
471     if (egress.egress && uid == AID_CLAT) return PASS;
472 
473     int match = bpf_owner_match(skb, sock_uid, egress, kver);
474 
475 // Workaround for secureVPN with VpnIsolation enabled, refer to b/159994981 for details.
476 // Keep TAG_SYSTEM_DNS in sync with DnsResolver/include/netd_resolv/resolv.h
477 // and TrafficStatsConstants.java
478 #define TAG_SYSTEM_DNS 0xFFFFFF82
479     if (tag == TAG_SYSTEM_DNS && uid == AID_DNS) {
480         uid = sock_uid;
481         if (match == DROP_UNLESS_DNS) match = PASS;
482     } else {
483         if (match == DROP_UNLESS_DNS) match = DROP;
484     }
485 
486     // If an outbound packet is going to be dropped, we do not count that traffic.
487     if (egress.egress && (match == DROP)) return DROP;
488 
489     StatsKey key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
490 
491     uint8_t* counterSet = bpf_uid_counterset_map_lookup_elem(&uid);
492     if (counterSet) key.counterSet = (uint32_t)*counterSet;
493 
494     uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
495     uint32_t* selectedMap = bpf_configuration_map_lookup_elem(&mapSettingKey);
496 
497     if (!selectedMap) return PASS;  // cannot happen, needed to keep bpf verifier happy
498 
499     do_packet_tracing(skb, egress, uid, tag, enable_tracing, kver);
500     update_stats_with_config(*selectedMap, skb, &key, egress, kver);
501     update_app_uid_stats_map(skb, &uid, egress, kver);
502 
503     // We've already handled DROP_UNLESS_DNS up above, thus when we reach here the only
504     // possible values of match are DROP(0) or PASS(1), however we need to use
505     // "match &= 1" before 'return match' to help the kernel's bpf verifier,
506     // so that it can be 100% certain that the returned value is always 0 or 1.
507     // We use assembly so that it cannot be optimized out by a too smart compiler.
508     asm("%0 &= 1" : "+r"(match));
509     return match;
510 }
511 
512 // Tracing on Android U+ 5.8+
513 DEFINE_BPF_PROG_EXT("cgroupskb/ingress/stats$trace", AID_ROOT, AID_SYSTEM,
514                     bpf_cgroup_ingress_trace, KVER_5_8, KVER_INF,
515                     BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER, MANDATORY,
516                     "fs_bpf_netd_readonly", "",
517                     LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
518 (struct __sk_buff* skb) {
519     return bpf_traffic_account(skb, INGRESS, TRACE_ON, KVER_5_8);
520 }
521 
522 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_19", AID_ROOT, AID_SYSTEM,
523                                 bpf_cgroup_ingress_4_19, KVER_4_19, KVER_INF)
524 (struct __sk_buff* skb) {
525     return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER_4_19);
526 }
527 
528 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_14", AID_ROOT, AID_SYSTEM,
529                                 bpf_cgroup_ingress_4_14, KVER_NONE, KVER_4_19)
530 (struct __sk_buff* skb) {
531     return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER_NONE);
532 }
533 
534 // Tracing on Android U+ 5.8+
535 DEFINE_BPF_PROG_EXT("cgroupskb/egress/stats$trace", AID_ROOT, AID_SYSTEM,
536                     bpf_cgroup_egress_trace, KVER_5_8, KVER_INF,
537                     BPFLOADER_MAINLINE_U_VERSION, BPFLOADER_MAX_VER, MANDATORY,
538                     "fs_bpf_netd_readonly", "",
539                     LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
540 (struct __sk_buff* skb) {
541     return bpf_traffic_account(skb, EGRESS, TRACE_ON, KVER_5_8);
542 }
543 
544 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_19", AID_ROOT, AID_SYSTEM,
545                                 bpf_cgroup_egress_4_19, KVER_4_19, KVER_INF)
546 (struct __sk_buff* skb) {
547     return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER_4_19);
548 }
549 
550 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_14", AID_ROOT, AID_SYSTEM,
551                                 bpf_cgroup_egress_4_14, KVER_NONE, KVER_4_19)
552 (struct __sk_buff* skb) {
553     return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER_NONE);
554 }
555 
556 // WARNING: Android T's non-updatable netd depends on the name of this program.
557 DEFINE_XTBPF_PROG("skfilter/egress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_egress_prog)
558 (struct __sk_buff* skb) {
559     // Clat daemon does not generate new traffic, all its traffic is accounted for already
560     // on the v4-* interfaces (except for the 20 (or 28) extra bytes of IPv6 vs IPv4 overhead,
561     // but that can be corrected for later when merging v4-foo stats into interface foo's).
562     // CLAT sockets are created by system server and tagged as uid CLAT, see tagSocketAsClat()
563     uint32_t sock_uid = bpf_get_socket_uid(skb);
564     if (sock_uid == AID_SYSTEM) {
565         uint64_t cookie = bpf_get_socket_cookie(skb);
566         UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
567         if (utag && utag->uid == AID_CLAT) return XTBPF_NOMATCH;
568     }
569 
570     uint32_t key = skb->ifindex;
571     update_iface_stats_map(skb, &key, EGRESS, KVER_NONE);
572     return XTBPF_MATCH;
573 }
574 
575 // WARNING: Android T's non-updatable netd depends on the name of this program.
576 DEFINE_XTBPF_PROG("skfilter/ingress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_ingress_prog)
577 (struct __sk_buff* skb) {
578     // Clat daemon traffic is not accounted by virtue of iptables raw prerouting drop rule
579     // (in clat_raw_PREROUTING chain), which triggers before this (in bw_raw_PREROUTING chain).
580     // It will be accounted for on the v4-* clat interface instead.
581     // Keep that in mind when moving this out of iptables xt_bpf and into tc ingress (or xdp).
582 
583     uint32_t key = skb->ifindex;
584     update_iface_stats_map(skb, &key, INGRESS, KVER_NONE);
585     return XTBPF_MATCH;
586 }
587 
588 DEFINE_SYS_BPF_PROG("schedact/ingress/account", AID_ROOT, AID_NET_ADMIN,
589                     tc_bpf_ingress_account_prog)
590 (struct __sk_buff* skb) {
591     if (is_received_skb(skb)) {
592         // Account for ingress traffic before tc drops it.
593         uint32_t key = skb->ifindex;
594         update_iface_stats_map(skb, &key, INGRESS, KVER_NONE);
595     }
596     return TC_ACT_UNSPEC;
597 }
598 
599 // WARNING: Android T's non-updatable netd depends on the name of this program.
600 DEFINE_XTBPF_PROG("skfilter/allowlist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_allowlist_prog)
601 (struct __sk_buff* skb) {
602     uint32_t sock_uid = bpf_get_socket_uid(skb);
603     if (is_system_uid(sock_uid)) return XTBPF_MATCH;
604 
605     // kernel's DEFAULT_OVERFLOWUID is 65534, this is the overflow 'nobody' uid,
606     // usually this being returned means that skb->sk is NULL during RX
607     // (early decap socket lookup failure), which commonly happens for incoming
608     // packets to an unconnected udp socket.
609     // But it can also happen for egress from a timewait socket.
610     // Let's treat such cases as 'root' which is_system_uid()
611     if (sock_uid == 65534) return XTBPF_MATCH;
612 
613     UidOwnerValue* allowlistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
614     if (allowlistMatch) return allowlistMatch->rule & HAPPY_BOX_MATCH ? XTBPF_MATCH : XTBPF_NOMATCH;
615     return XTBPF_NOMATCH;
616 }
617 
618 // WARNING: Android T's non-updatable netd depends on the name of this program.
619 DEFINE_XTBPF_PROG("skfilter/denylist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_denylist_prog)
620 (struct __sk_buff* skb) {
621     uint32_t sock_uid = bpf_get_socket_uid(skb);
622     UidOwnerValue* denylistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
623     uint32_t penalty_box = PENALTY_BOX_USER_MATCH | PENALTY_BOX_ADMIN_MATCH;
624     if (denylistMatch) return denylistMatch->rule & penalty_box ? XTBPF_MATCH : XTBPF_NOMATCH;
625     return XTBPF_NOMATCH;
626 }
627 
get_app_permissions()628 static __always_inline inline uint8_t get_app_permissions() {
629     uint64_t gid_uid = bpf_get_current_uid_gid();
630     /*
631      * A given app is guaranteed to have the same app ID in all the profiles in
632      * which it is installed, and install permission is granted to app for all
633      * user at install time so we only check the appId part of a request uid at
634      * run time. See UserHandle#isSameApp for detail.
635      */
636     uint32_t appId = (gid_uid & 0xffffffff) % AID_USER_OFFSET;  // == PER_USER_RANGE == 100000
637     uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId);
638     // if UID not in map, then default to just INTERNET permission.
639     return permissions ? *permissions : BPF_PERMISSION_INTERNET;
640 }
641 
642 DEFINE_NETD_BPF_PROG_KVER("cgroupsock/inet_create", AID_ROOT, AID_ROOT, inet_socket_create,
643                           KVER_4_14)
644 (__unused struct bpf_sock* sk) {
645     return (get_app_permissions() & BPF_PERMISSION_INTERNET) ? BPF_ALLOW : BPF_DISALLOW;
646 }
647 
648 DEFINE_NETD_BPF_PROG_KVER("cgroupsockrelease/inet_release", AID_ROOT, AID_ROOT,
649                           inet_socket_release, KVER_5_10)
650 (struct bpf_sock* sk) {
651     uint64_t cookie = bpf_get_sk_cookie(sk);
652     if (cookie) bpf_cookie_tag_map_delete_elem(&cookie);
653 
654     return 1;
655 }
656 
check_localhost(__unused struct bpf_sock_addr * ctx)657 static __always_inline inline int check_localhost(__unused struct bpf_sock_addr *ctx) {
658     // See include/uapi/linux/bpf.h:
659     //
660     // struct bpf_sock_addr {
661     //   __u32 user_family;	//     R: 4 byte
662     //   __u32 user_ip4;	// BE, R: 1,2,4-byte,   W: 4-byte
663     //   __u32 user_ip6[4];	// BE, R: 1,2,4,8-byte, W: 4,8-byte
664     //   __u32 user_port;	// BE, R: 1,2,4-byte,   W: 4-byte
665     //   __u32 family;		//     R: 4 byte
666     //   __u32 type;		//     R: 4 byte
667     //   __u32 protocol;	//     R: 4 byte
668     //   __u32 msg_src_ip4;	// BE, R: 1,2,4-byte,   W: 4-byte
669     //   __u32 msg_src_ip6[4];	// BE, R: 1,2,4,8-byte, W: 4,8-byte
670     //   __bpf_md_ptr(struct bpf_sock *, sk);
671     // };
672     return BPF_ALLOW;
673 }
674 
block_port(struct bpf_sock_addr * ctx)675 static inline __always_inline int block_port(struct bpf_sock_addr *ctx) {
676     if (!ctx->user_port) return BPF_ALLOW;
677 
678     switch (ctx->protocol) {
679         case IPPROTO_TCP:
680         case IPPROTO_MPTCP:
681         case IPPROTO_UDP:
682         case IPPROTO_UDPLITE:
683         case IPPROTO_DCCP:
684         case IPPROTO_SCTP:
685             break;
686         default:
687             return BPF_ALLOW; // unknown protocols are allowed
688     }
689 
690     int key = ctx->user_port >> 6;
691     int shift = ctx->user_port & 63;
692 
693     uint64_t *val = bpf_blocked_ports_map_lookup_elem(&key);
694     // Lookup should never fail in reality, but if it does return here to keep the
695     // BPF verifier happy.
696     if (!val) return BPF_ALLOW;
697 
698     if ((*val >> shift) & 1) return BPF_DISALLOW;
699     return BPF_ALLOW;
700 }
701 
702 DEFINE_NETD_BPF_PROG_KVER("bind4/inet4_bind", AID_ROOT, AID_ROOT, inet4_bind, KVER_4_19)
703 (struct bpf_sock_addr *ctx) {
704     return block_port(ctx);
705 }
706 
707 DEFINE_NETD_BPF_PROG_KVER("bind6/inet6_bind", AID_ROOT, AID_ROOT, inet6_bind, KVER_4_19)
708 (struct bpf_sock_addr *ctx) {
709     return block_port(ctx);
710 }
711 
712 DEFINE_NETD_V_BPF_PROG_KVER("connect4/inet4_connect", AID_ROOT, AID_ROOT, inet4_connect, KVER_4_19)
713 (struct bpf_sock_addr *ctx) {
714     return check_localhost(ctx);
715 }
716 
717 DEFINE_NETD_V_BPF_PROG_KVER("connect6/inet6_connect", AID_ROOT, AID_ROOT, inet6_connect, KVER_4_19)
718 (struct bpf_sock_addr *ctx) {
719     return check_localhost(ctx);
720 }
721 
722 DEFINE_NETD_V_BPF_PROG_KVER("recvmsg4/udp4_recvmsg", AID_ROOT, AID_ROOT, udp4_recvmsg, KVER_4_19)
723 (struct bpf_sock_addr *ctx) {
724     return check_localhost(ctx);
725 }
726 
727 DEFINE_NETD_V_BPF_PROG_KVER("recvmsg6/udp6_recvmsg", AID_ROOT, AID_ROOT, udp6_recvmsg, KVER_4_19)
728 (struct bpf_sock_addr *ctx) {
729     return check_localhost(ctx);
730 }
731 
732 DEFINE_NETD_V_BPF_PROG_KVER("sendmsg4/udp4_sendmsg", AID_ROOT, AID_ROOT, udp4_sendmsg, KVER_4_19)
733 (struct bpf_sock_addr *ctx) {
734     return check_localhost(ctx);
735 }
736 
737 DEFINE_NETD_V_BPF_PROG_KVER("sendmsg6/udp6_sendmsg", AID_ROOT, AID_ROOT, udp6_sendmsg, KVER_4_19)
738 (struct bpf_sock_addr *ctx) {
739     return check_localhost(ctx);
740 }
741 
742 DEFINE_NETD_V_BPF_PROG_KVER("getsockopt/prog", AID_ROOT, AID_ROOT, getsockopt_prog, KVER_5_4)
743 (struct bpf_sockopt *ctx) {
744     // Tell kernel to return 'original' kernel reply (instead of the bpf modified buffer)
745     // This is important if the answer is larger than PAGE_SIZE (max size this bpf hook can provide)
746     ctx->optlen = 0;
747     return BPF_ALLOW;
748 }
749 
750 DEFINE_NETD_V_BPF_PROG_KVER("setsockopt/prog", AID_ROOT, AID_ROOT, setsockopt_prog, KVER_5_4)
751 (struct bpf_sockopt *ctx) {
752     // Tell kernel to use/process original buffer provided by userspace.
753     // This is important if it is larger than PAGE_SIZE (max size this bpf hook can handle).
754     ctx->optlen = 0;
755     return BPF_ALLOW;
756 }
757 
758 LICENSE("Apache 2.0");
759 CRITICAL("Connectivity and netd");
760