1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #pragma once
18 
19 #include <linux/bpf.h>
20 #include <linux/if.h>
21 #include <linux/if_ether.h>
22 #include <linux/if_packet.h>
23 #include <linux/in.h>
24 #include <linux/in6.h>
25 #include <linux/ip.h>
26 #include <linux/ipv6.h>
27 #include <linux/pkt_cls.h>
28 #include <linux/tcp.h>
29 // bionic kernel uapi linux/udp.h header is munged...
30 #define __kernel_udphdr udphdr
31 #include <linux/udp.h>
32 #include <stdbool.h>
33 #include <stdint.h>
34 
35 #include "bpf_helpers.h"
36 
37 // IP flags. (from kernel's include/net/ip.h)
38 #define IP_CE      0x8000  // Flag: "Congestion" (really reserved 'evil bit')
39 #define IP_DF      0x4000  // Flag: "Don't Fragment"
40 #define IP_MF      0x2000  // Flag: "More Fragments"
41 #define IP_OFFSET  0x1FFF  // "Fragment Offset" part
42 
43 // IPv6 fragmentation header. (from kernel's include/net/ipv6.h)
44 struct frag_hdr {
45     __u8   nexthdr;
46     __u8   reserved;        // always zero
47     __be16 frag_off;        // 13 bit offset, 2 bits zero, 1 bit "More Fragments"
48     __be32 identification;
49 };
50 
51 // ----- Helper functions for offsets to fields -----
52 
53 // They all assume simple IP packets:
54 //   - no VLAN ethernet tags
55 //   - no IPv4 options (see IPV4_HLEN/TCP4_OFFSET/UDP4_OFFSET)
56 //   - no IPv6 extension headers
57 //   - no TCP options (see TCP_HLEN)
58 
59 //#define ETH_HLEN sizeof(struct ethhdr)
60 #define IP4_HLEN sizeof(struct iphdr)
61 #define IP6_HLEN sizeof(struct ipv6hdr)
62 #define TCP_HLEN sizeof(struct tcphdr)
63 #define UDP_HLEN sizeof(struct udphdr)
64 
65 // Offsets from beginning of L4 (TCP/UDP) header
66 #define TCP_OFFSET(field) offsetof(struct tcphdr, field)
67 #define UDP_OFFSET(field) offsetof(struct udphdr, field)
68 
69 // Offsets from beginning of L3 (IPv4) header
70 #define IP4_OFFSET(field) offsetof(struct iphdr, field)
71 #define IP4_TCP_OFFSET(field) (IP4_HLEN + TCP_OFFSET(field))
72 #define IP4_UDP_OFFSET(field) (IP4_HLEN + UDP_OFFSET(field))
73 
74 // Offsets from beginning of L3 (IPv6) header
75 #define IP6_OFFSET(field) offsetof(struct ipv6hdr, field)
76 #define IP6_TCP_OFFSET(field) (IP6_HLEN + TCP_OFFSET(field))
77 #define IP6_UDP_OFFSET(field) (IP6_HLEN + UDP_OFFSET(field))
78 
79 // Offsets from beginning of L2 (ie. Ethernet) header (which must be present)
80 #define ETH_IP4_OFFSET(field) (ETH_HLEN + IP4_OFFSET(field))
81 #define ETH_IP4_TCP_OFFSET(field) (ETH_HLEN + IP4_TCP_OFFSET(field))
82 #define ETH_IP4_UDP_OFFSET(field) (ETH_HLEN + IP4_UDP_OFFSET(field))
83 #define ETH_IP6_OFFSET(field) (ETH_HLEN + IP6_OFFSET(field))
84 #define ETH_IP6_TCP_OFFSET(field) (ETH_HLEN + IP6_TCP_OFFSET(field))
85 #define ETH_IP6_UDP_OFFSET(field) (ETH_HLEN + IP6_UDP_OFFSET(field))
86 
87 // this returns 0 iff skb->sk is NULL
88 static uint64_t (*bpf_get_socket_cookie)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_cookie;
89 static uint64_t (*bpf_get_sk_cookie)(struct bpf_sock* sk) = (void*)BPF_FUNC_get_socket_cookie;
90 
91 static uint32_t (*bpf_get_socket_uid)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_uid;
92 
93 static int (*bpf_skb_pull_data)(struct __sk_buff* skb, __u32 len) = (void*)BPF_FUNC_skb_pull_data;
94 
95 static int (*bpf_skb_load_bytes)(const struct __sk_buff* skb, int off, void* to,
96                                  int len) = (void*)BPF_FUNC_skb_load_bytes;
97 
98 static int (*bpf_skb_load_bytes_relative)(const struct __sk_buff* skb, int off, void* to, int len,
99                                           int start_hdr) = (void*)BPF_FUNC_skb_load_bytes_relative;
100 
101 static int (*bpf_skb_store_bytes)(struct __sk_buff* skb, __u32 offset, const void* from, __u32 len,
102                                   __u64 flags) = (void*)BPF_FUNC_skb_store_bytes;
103 
104 static int64_t (*bpf_csum_diff)(__be32* from, __u32 from_size, __be32* to, __u32 to_size,
105                                 __wsum seed) = (void*)BPF_FUNC_csum_diff;
106 
107 static int64_t (*bpf_csum_update)(struct __sk_buff* skb, __wsum csum) = (void*)BPF_FUNC_csum_update;
108 
109 static int (*bpf_skb_change_proto)(struct __sk_buff* skb, __be16 proto,
110                                    __u64 flags) = (void*)BPF_FUNC_skb_change_proto;
111 static int (*bpf_l3_csum_replace)(struct __sk_buff* skb, __u32 offset, __u64 from, __u64 to,
112                                   __u64 flags) = (void*)BPF_FUNC_l3_csum_replace;
113 static int (*bpf_l4_csum_replace)(struct __sk_buff* skb, __u32 offset, __u64 from, __u64 to,
114                                   __u64 flags) = (void*)BPF_FUNC_l4_csum_replace;
115 static int (*bpf_redirect)(__u32 ifindex, __u64 flags) = (void*)BPF_FUNC_redirect;
116 static int (*bpf_redirect_map)(const struct bpf_map_def* map, __u32 key,
117                                __u64 flags) = (void*)BPF_FUNC_redirect_map;
118 
119 static int (*bpf_skb_change_head)(struct __sk_buff* skb, __u32 head_room,
120                                   __u64 flags) = (void*)BPF_FUNC_skb_change_head;
121 static int (*bpf_skb_adjust_room)(struct __sk_buff* skb, __s32 len_diff, __u32 mode,
122                                   __u64 flags) = (void*)BPF_FUNC_skb_adjust_room;
123 
124 // Android only supports little endian architectures
125 #define htons(x) (__builtin_constant_p(x) ? ___constant_swab16(x) : __builtin_bswap16(x))
126 #define htonl(x) (__builtin_constant_p(x) ? ___constant_swab32(x) : __builtin_bswap32(x))
127 #define ntohs(x) htons(x)
128 #define ntohl(x) htonl(x)
129 
is_received_skb(struct __sk_buff * skb)130 static inline __always_inline __unused bool is_received_skb(struct __sk_buff* skb) {
131     return skb->pkt_type == PACKET_HOST || skb->pkt_type == PACKET_BROADCAST ||
132            skb->pkt_type == PACKET_MULTICAST;
133 }
134 
135 // try to make the first 'len' header bytes readable/writable via direct packet access
136 // (note: AFAIK there is no way to ask for only direct packet read without also getting write)
try_make_writable(struct __sk_buff * skb,unsigned len)137 static inline __always_inline void try_make_writable(struct __sk_buff* skb, unsigned len) {
138     if (len > skb->len) len = skb->len;
139     if (skb->data_end - skb->data < len) bpf_skb_pull_data(skb, len);
140 }
141 
142 // anti-compiler-optimizer no-op: explicitly force full calculation of 'v'
143 //
144 // The use for this is to force full calculation of a complex arithmetic (likely binary
145 // bitops) value, and then check the result only once (thus likely reducing the number
146 // of required conditional jump instructions that badly affect bpf verifier runtime)
147 //
148 // The compiler cannot look into the assembly statement, so it doesn't know it does nothing.
149 // Since the statement takes 'v' as both input and output in a register (+r),
150 // the compiler must fully calculate the precise value of 'v' before this,
151 // and must use the (possibly modified) value of 'v' afterwards (thus cannot
152 // do funky optimizations to use partial results from before the asm).
153 //
154 // As this is not flagged 'volatile' this may still be moved out of a loop,
155 // or even entirely optimized out if 'v' is never used afterwards.
156 //
157 // See: https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html
158 #define COMPILER_FORCE_CALCULATION(v) asm ("" : "+r" (v))
159 
160 struct egress_bool { bool egress; };
161 #define INGRESS ((struct egress_bool){ .egress = false })
162 #define EGRESS ((struct egress_bool){ .egress = true })
163 
164 struct stream_bool { bool down; };
165 #define UPSTREAM ((struct stream_bool){ .down = false })
166 #define DOWNSTREAM ((struct stream_bool){ .down = true })
167 
168 struct rawip_bool { bool rawip; };
169 #define ETHER ((struct rawip_bool){ .rawip = false })
170 #define RAWIP ((struct rawip_bool){ .rawip = true })
171 
172 struct updatetime_bool { bool updatetime; };
173 #define NO_UPDATETIME ((struct updatetime_bool){ .updatetime = false })
174 #define UPDATETIME ((struct updatetime_bool){ .updatetime = true })
175 
176 // Return value for xt_bpf (netfilter match extension) programs
177 static const int XTBPF_NOMATCH = 0;
178 static const int XTBPF_MATCH = 1;
179 
180 static const int BPF_DISALLOW = 0;
181 static const int BPF_ALLOW = 1;
182