1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <bpf_helpers.h>
18 #include <linux/bpf.h>
19 #include <linux/if.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_packet.h>
22 #include <linux/in.h>
23 #include <linux/in6.h>
24 #include <linux/ip.h>
25 #include <linux/ipv6.h>
26 #include <linux/tcp.h>
27 #include <stdbool.h>
28 #include <stdint.h>
29 #include "bpf_net_helpers.h"
30 #include "netdbpf/bpf_shared.h"
31 
32 // This is defined for cgroup bpf filter only.
33 #define BPF_DROP_UNLESS_DNS 2
34 #define BPF_PASS 1
35 #define BPF_DROP 0
36 
37 // This is used for xt_bpf program only.
38 #define BPF_NOMATCH 0
39 #define BPF_MATCH 1
40 
41 #define BPF_EGRESS 0
42 #define BPF_INGRESS 1
43 
44 #define IP_PROTO_OFF offsetof(struct iphdr, protocol)
45 #define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr)
46 #define IPPROTO_IHL_OFF 0
47 #define TCP_FLAG_OFF 13
48 #define RST_OFFSET 2
49 
DEFINE_BPF_MAP_GRO(cookie_tag_map,HASH,uint64_t,UidTagValue,COOKIE_UID_MAP_SIZE,AID_NET_BW_ACCT)50 DEFINE_BPF_MAP_GRO(cookie_tag_map, HASH, uint64_t, UidTagValue, COOKIE_UID_MAP_SIZE,
51                    AID_NET_BW_ACCT)
52 DEFINE_BPF_MAP_GRO(uid_counterset_map, HASH, uint32_t, uint8_t, UID_COUNTERSET_MAP_SIZE,
53                    AID_NET_BW_ACCT)
54 DEFINE_BPF_MAP_GRO(app_uid_stats_map, HASH, uint32_t, StatsValue, APP_STATS_MAP_SIZE,
55                    AID_NET_BW_STATS)
56 DEFINE_BPF_MAP_GRW(stats_map_A, HASH, StatsKey, StatsValue, STATS_MAP_SIZE, AID_NET_BW_STATS)
57 DEFINE_BPF_MAP_GRW(stats_map_B, HASH, StatsKey, StatsValue, STATS_MAP_SIZE, AID_NET_BW_STATS)
58 DEFINE_BPF_MAP_GRO(iface_stats_map, HASH, uint32_t, StatsValue, IFACE_STATS_MAP_SIZE,
59                    AID_NET_BW_STATS)
60 DEFINE_BPF_MAP_GRO(configuration_map, HASH, uint32_t, uint8_t, CONFIGURATION_MAP_SIZE,
61                    AID_NET_BW_STATS)
62 DEFINE_BPF_MAP(uid_owner_map, HASH, uint32_t, UidOwnerValue, UID_OWNER_MAP_SIZE)
63 
64 /* never actually used from ebpf */
65 DEFINE_BPF_MAP_GRO(iface_index_name_map, HASH, uint32_t, IfaceValue, IFACE_INDEX_NAME_MAP_SIZE,
66                    AID_NET_BW_STATS)
67 
68 static __always_inline int is_system_uid(uint32_t uid) {
69     return (uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID);
70 }
71 
72 /*
73  * Note: this blindly assumes an MTU of 1500, and that packets > MTU are always TCP,
74  * and that TCP is using the Linux default settings with TCP timestamp option enabled
75  * which uses 12 TCP option bytes per frame.
76  *
77  * These are not unreasonable assumptions:
78  *
79  * The internet does not really support MTUs greater than 1500, so most TCP traffic will
80  * be at that MTU, or slightly below it (worst case our upwards adjustment is too small).
81  *
82  * The chance our traffic isn't IP at all is basically zero, so the IP overhead correction
83  * is bound to be needed.
84  *
85  * Furthermore, the likelyhood that we're having to deal with GSO (ie. > MTU) packets that
86  * are not IP/TCP is pretty small (few other things are supported by Linux) and worse case
87  * our extra overhead will be slightly off, but probably still better than assuming none.
88  *
89  * Most servers are also Linux and thus support/default to using TCP timestamp option
90  * (and indeed TCP timestamp option comes from RFC 1323 titled "TCP Extensions for High
91  * Performance" which also defined TCP window scaling and are thus absolutely ancient...).
92  *
93  * All together this should be more correct than if we simply ignored GSO frames
94  * (ie. counted them as single packets with no extra overhead)
95  *
96  * Especially since the number of packets is important for any future clat offload correction.
97  * (which adjusts upward by 20 bytes per packet to account for ipv4 -> ipv6 header conversion)
98  */
99 #define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey)                                          \
100     static __always_inline inline void update_##the_stats_map(struct __sk_buff* skb,           \
101                                                               int direction, TypeOfKey* key) { \
102         StatsValue* value = bpf_##the_stats_map##_lookup_elem(key);                            \
103         if (!value) {                                                                          \
104             StatsValue newValue = {};                                                          \
105             bpf_##the_stats_map##_update_elem(key, &newValue, BPF_NOEXIST);                    \
106             value = bpf_##the_stats_map##_lookup_elem(key);                                    \
107         }                                                                                      \
108         if (value) {                                                                           \
109             const int mtu = 1500;                                                              \
110             uint64_t packets = 1;                                                              \
111             uint64_t bytes = skb->len;                                                         \
112             if (bytes > mtu) {                                                                 \
113                 bool is_ipv6 = (skb->protocol == htons(ETH_P_IPV6));                           \
114                 int ip_overhead = (is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr));   \
115                 int tcp_overhead = ip_overhead + sizeof(struct tcphdr) + 12;                   \
116                 int mss = mtu - tcp_overhead;                                                  \
117                 uint64_t payload = bytes - tcp_overhead;                                       \
118                 packets = (payload + mss - 1) / mss;                                           \
119                 bytes = tcp_overhead * packets + payload;                                      \
120             }                                                                                  \
121             if (direction == BPF_EGRESS) {                                                     \
122                 __sync_fetch_and_add(&value->txPackets, packets);                              \
123                 __sync_fetch_and_add(&value->txBytes, bytes);                                  \
124             } else if (direction == BPF_INGRESS) {                                             \
125                 __sync_fetch_and_add(&value->rxPackets, packets);                              \
126                 __sync_fetch_and_add(&value->rxBytes, bytes);                                  \
127             }                                                                                  \
128         }                                                                                      \
129     }
130 
DEFINE_UPDATE_STATS(app_uid_stats_map,uint32_t)131 DEFINE_UPDATE_STATS(app_uid_stats_map, uint32_t)
132 DEFINE_UPDATE_STATS(iface_stats_map, uint32_t)
133 DEFINE_UPDATE_STATS(stats_map_A, StatsKey)
134 DEFINE_UPDATE_STATS(stats_map_B, StatsKey)
135 
136 static inline bool skip_owner_match(struct __sk_buff* skb) {
137     int offset = -1;
138     int ret = 0;
139     if (skb->protocol == htons(ETH_P_IP)) {
140         offset = IP_PROTO_OFF;
141         uint8_t proto, ihl;
142         uint8_t flag;
143         ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
144         if (!ret) {
145             if (proto == IPPROTO_ESP) {
146                 return true;
147             } else if (proto == IPPROTO_TCP) {
148                 ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1);
149                 ihl = ihl & 0x0F;
150                 ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1);
151                 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
152                     return true;
153                 }
154             }
155         }
156     } else if (skb->protocol == htons(ETH_P_IPV6)) {
157         offset = IPV6_PROTO_OFF;
158         uint8_t proto;
159         ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
160         if (!ret) {
161             if (proto == IPPROTO_ESP) {
162                 return true;
163             } else if (proto == IPPROTO_TCP) {
164                 uint8_t flag;
165                 ret = bpf_skb_load_bytes(skb, sizeof(struct ipv6hdr) + TCP_FLAG_OFF, &flag, 1);
166                 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
167                     return true;
168                 }
169             }
170         }
171     }
172     return false;
173 }
174 
getConfig(uint32_t configKey)175 static __always_inline BpfConfig getConfig(uint32_t configKey) {
176     uint32_t mapSettingKey = configKey;
177     BpfConfig* config = bpf_configuration_map_lookup_elem(&mapSettingKey);
178     if (!config) {
179         // Couldn't read configuration entry. Assume everything is disabled.
180         return DEFAULT_CONFIG;
181     }
182     return *config;
183 }
184 
bpf_owner_match(struct __sk_buff * skb,uint32_t uid,int direction)185 static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid, int direction) {
186     if (skip_owner_match(skb)) return BPF_PASS;
187 
188     if (is_system_uid(uid)) return BPF_PASS;
189 
190     BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
191 
192     UidOwnerValue* uidEntry = bpf_uid_owner_map_lookup_elem(&uid);
193     uint8_t uidRules = uidEntry ? uidEntry->rule : 0;
194     uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0;
195 
196     if (enabledRules) {
197         if ((enabledRules & DOZABLE_MATCH) && !(uidRules & DOZABLE_MATCH)) {
198             return BPF_DROP;
199         }
200         if ((enabledRules & STANDBY_MATCH) && (uidRules & STANDBY_MATCH)) {
201             return BPF_DROP;
202         }
203         if ((enabledRules & POWERSAVE_MATCH) && !(uidRules & POWERSAVE_MATCH)) {
204             return BPF_DROP;
205         }
206     }
207     if (direction == BPF_INGRESS && (uidRules & IIF_MATCH)) {
208         // Drops packets not coming from lo nor the allowlisted interface
209         if (allowed_iif && skb->ifindex != 1 && skb->ifindex != allowed_iif) {
210             return BPF_DROP_UNLESS_DNS;
211         }
212     }
213     return BPF_PASS;
214 }
215 
update_stats_with_config(struct __sk_buff * skb,int direction,StatsKey * key,uint8_t selectedMap)216 static __always_inline inline void update_stats_with_config(struct __sk_buff* skb, int direction,
217                                                             StatsKey* key, uint8_t selectedMap) {
218     if (selectedMap == SELECT_MAP_A) {
219         update_stats_map_A(skb, direction, key);
220     } else if (selectedMap == SELECT_MAP_B) {
221         update_stats_map_B(skb, direction, key);
222     }
223 }
224 
bpf_traffic_account(struct __sk_buff * skb,int direction)225 static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) {
226     uint32_t sock_uid = bpf_get_socket_uid(skb);
227     // Always allow and never count clat traffic. Only the IPv4 traffic on the stacked
228     // interface is accounted for and subject to usage restrictions.
229     if (sock_uid == AID_CLAT) {
230         return BPF_PASS;
231     }
232 
233     int match = bpf_owner_match(skb, sock_uid, direction);
234     if ((direction == BPF_EGRESS) && (match == BPF_DROP)) {
235         // If an outbound packet is going to be dropped, we do not count that
236         // traffic.
237         return match;
238     }
239 
240     uint64_t cookie = bpf_get_socket_cookie(skb);
241     UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
242     uint32_t uid, tag;
243     if (utag) {
244         uid = utag->uid;
245         tag = utag->tag;
246     } else {
247         uid = sock_uid;
248         tag = 0;
249     }
250 
251 // Workaround for secureVPN with VpnIsolation enabled, refer to b/159994981 for details.
252 // Keep TAG_SYSTEM_DNS in sync with DnsResolver/include/netd_resolv/resolv.h
253 // and TrafficStatsConstants.java
254 #define TAG_SYSTEM_DNS 0xFFFFFF82
255     if (tag == TAG_SYSTEM_DNS && uid == AID_DNS) {
256         uid = sock_uid;
257         if (match == BPF_DROP_UNLESS_DNS) match = BPF_PASS;
258     } else {
259         if (match == BPF_DROP_UNLESS_DNS) match = BPF_DROP;
260     }
261 
262     StatsKey key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
263 
264     uint8_t* counterSet = bpf_uid_counterset_map_lookup_elem(&uid);
265     if (counterSet) key.counterSet = (uint32_t)*counterSet;
266 
267     uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
268     uint8_t* selectedMap = bpf_configuration_map_lookup_elem(&mapSettingKey);
269     if (!selectedMap) {
270         return match;
271     }
272 
273     if (key.tag) {
274         update_stats_with_config(skb, direction, &key, *selectedMap);
275         key.tag = 0;
276     }
277 
278     update_stats_with_config(skb, direction, &key, *selectedMap);
279     update_app_uid_stats_map(skb, direction, &uid);
280     return match;
281 }
282 
283 SEC("cgroupskb/ingress/stats")
bpf_cgroup_ingress(struct __sk_buff * skb)284 int bpf_cgroup_ingress(struct __sk_buff* skb) {
285     return bpf_traffic_account(skb, BPF_INGRESS);
286 }
287 
288 SEC("cgroupskb/egress/stats")
bpf_cgroup_egress(struct __sk_buff * skb)289 int bpf_cgroup_egress(struct __sk_buff* skb) {
290     return bpf_traffic_account(skb, BPF_EGRESS);
291 }
292 
293 DEFINE_BPF_PROG("skfilter/egress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_egress_prog)
294 (struct __sk_buff* skb) {
295     // Clat daemon does not generate new traffic, all its traffic is accounted for already
296     // on the v4-* interfaces (except for the 20 (or 28) extra bytes of IPv6 vs IPv4 overhead,
297     // but that can be corrected for later when merging v4-foo stats into interface foo's).
298     uint32_t sock_uid = bpf_get_socket_uid(skb);
299     if (sock_uid == AID_CLAT) return BPF_NOMATCH;
300 
301     uint32_t key = skb->ifindex;
302     update_iface_stats_map(skb, BPF_EGRESS, &key);
303     return BPF_MATCH;
304 }
305 
306 DEFINE_BPF_PROG("skfilter/ingress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_ingress_prog)
307 (struct __sk_buff* skb) {
308     // Clat daemon traffic is not accounted by virtue of iptables raw prerouting drop rule
309     // (in clat_raw_PREROUTING chain), which triggers before this (in bw_raw_PREROUTING chain).
310     // It will be accounted for on the v4-* clat interface instead.
311     // Keep that in mind when moving this out of iptables xt_bpf and into tc ingress (or xdp).
312 
313     uint32_t key = skb->ifindex;
314     update_iface_stats_map(skb, BPF_INGRESS, &key);
315     return BPF_MATCH;
316 }
317 
318 DEFINE_BPF_PROG("skfilter/allowlist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_allowlist_prog)
319 (struct __sk_buff* skb) {
320     uint32_t sock_uid = bpf_get_socket_uid(skb);
321     if (is_system_uid(sock_uid)) return BPF_MATCH;
322 
323     // 65534 is the overflow 'nobody' uid, usually this being returned means
324     // that skb->sk is NULL during RX (early decap socket lookup failure),
325     // which commonly happens for incoming packets to an unconnected udp socket.
326     // Additionally bpf_get_socket_cookie() returns 0 if skb->sk is NULL
327     if ((sock_uid == 65534) && !bpf_get_socket_cookie(skb) && is_received_skb(skb))
328         return BPF_MATCH;
329 
330     UidOwnerValue* allowlistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
331     if (allowlistMatch) return allowlistMatch->rule & HAPPY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
332     return BPF_NOMATCH;
333 }
334 
335 DEFINE_BPF_PROG("skfilter/denylist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_denylist_prog)
336 (struct __sk_buff* skb) {
337     uint32_t sock_uid = bpf_get_socket_uid(skb);
338     UidOwnerValue* denylistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
339     if (denylistMatch) return denylistMatch->rule & PENALTY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
340     return BPF_NOMATCH;
341 }
342 
DEFINE_BPF_MAP(uid_permission_map,HASH,uint32_t,uint8_t,UID_OWNER_MAP_SIZE)343 DEFINE_BPF_MAP(uid_permission_map, HASH, uint32_t, uint8_t, UID_OWNER_MAP_SIZE)
344 
345 DEFINE_BPF_PROG_KVER("cgroupsock/inet/create", AID_ROOT, AID_ROOT, inet_socket_create,
346                      KVER(4, 14, 0))
347 (struct bpf_sock* sk) {
348     uint64_t gid_uid = bpf_get_current_uid_gid();
349     /*
350      * A given app is guaranteed to have the same app ID in all the profiles in
351      * which it is installed, and install permission is granted to app for all
352      * user at install time so we only check the appId part of a request uid at
353      * run time. See UserHandle#isSameApp for detail.
354      */
355     uint32_t appId = (gid_uid & 0xffffffff) % PER_USER_RANGE;
356     uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId);
357     if (!permissions) {
358         // UID not in map. Default to just INTERNET permission.
359         return 1;
360     }
361 
362     // A return value of 1 means allow, everything else means deny.
363     return (*permissions & BPF_PERMISSION_INTERNET) == BPF_PERMISSION_INTERNET;
364 }
365 
366 LICENSE("Apache 2.0");
367 CRITICAL("netd");
368