blob: 3e24468e0b181bba0bb800b9ab9b50c10d77d426 [file] [log] [blame]
Ken Chen587d4232022-01-17 17:18:43 +08001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Maciej Żenczykowskib6efc7f2022-05-24 15:56:03 -070017// The resulting .o needs to load on the Android T Beta 3 bpfloader
18#define BPFLOADER_MIN_VER BPFLOADER_T_BETA3_VERSION
Maciej Żenczykowskifa61d492022-05-16 16:05:15 -070019
Ken Chen587d4232022-01-17 17:18:43 +080020#include <bpf_helpers.h>
21#include <linux/bpf.h>
22#include <linux/if.h>
23#include <linux/if_ether.h>
24#include <linux/if_packet.h>
25#include <linux/in.h>
26#include <linux/in6.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <linux/pkt_cls.h>
30#include <linux/tcp.h>
Ken Chenf426b2b2022-01-23 15:39:13 +080031#include <netdutils/UidConstants.h>
Ken Chen587d4232022-01-17 17:18:43 +080032#include <stdbool.h>
33#include <stdint.h>
34#include "bpf_net_helpers.h"
35#include "bpf_shared.h"
36
37// This is defined for cgroup bpf filter only.
38#define BPF_DROP_UNLESS_DNS 2
39#define BPF_PASS 1
40#define BPF_DROP 0
41
42// This is used for xt_bpf program only.
43#define BPF_NOMATCH 0
44#define BPF_MATCH 1
45
46#define BPF_EGRESS 0
47#define BPF_INGRESS 1
48
49#define IP_PROTO_OFF offsetof(struct iphdr, protocol)
50#define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr)
51#define IPPROTO_IHL_OFF 0
52#define TCP_FLAG_OFF 13
53#define RST_OFFSET 2
54
Maciej Żenczykowski30e54762022-06-13 17:56:06 -070055// For maps netd does not need to access
56#define DEFINE_BPF_MAP_NO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
57 DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, AID_NET_BW_ACCT, 0060)
58
59// For maps netd only needs read only access to
60#define DEFINE_BPF_MAP_RO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
61 DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, AID_NET_BW_ACCT, 0460)
62
63// For maps netd needs to be able to read and write
64#define DEFINE_BPF_MAP_RW_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
65 DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, AID_NET_BW_ACCT, 0660)
66
67DEFINE_BPF_MAP_RW_NETD(cookie_tag_map, HASH, uint64_t, UidTagValue, COOKIE_UID_MAP_SIZE)
68DEFINE_BPF_MAP_NO_NETD(uid_counterset_map, HASH, uint32_t, uint8_t, UID_COUNTERSET_MAP_SIZE)
69DEFINE_BPF_MAP_NO_NETD(app_uid_stats_map, HASH, uint32_t, StatsValue, APP_STATS_MAP_SIZE)
70DEFINE_BPF_MAP_RW_NETD(stats_map_A, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
71DEFINE_BPF_MAP_RO_NETD(stats_map_B, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
72DEFINE_BPF_MAP_NO_NETD(iface_stats_map, HASH, uint32_t, StatsValue, IFACE_STATS_MAP_SIZE)
73DEFINE_BPF_MAP_RW_NETD(configuration_map, HASH, uint32_t, uint32_t, CONFIGURATION_MAP_SIZE)
74DEFINE_BPF_MAP_NO_NETD(uid_owner_map, HASH, uint32_t, UidOwnerValue, UID_OWNER_MAP_SIZE)
75DEFINE_BPF_MAP_RW_NETD(uid_permission_map, HASH, uint32_t, uint8_t, UID_OWNER_MAP_SIZE)
Ken Chen587d4232022-01-17 17:18:43 +080076
77/* never actually used from ebpf */
Maciej Żenczykowski30e54762022-06-13 17:56:06 -070078DEFINE_BPF_MAP_NO_NETD(iface_index_name_map, HASH, uint32_t, IfaceValue, IFACE_INDEX_NAME_MAP_SIZE)
Ken Chen587d4232022-01-17 17:18:43 +080079
80static __always_inline int is_system_uid(uint32_t uid) {
81 return (uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID);
82}
83
84/*
85 * Note: this blindly assumes an MTU of 1500, and that packets > MTU are always TCP,
86 * and that TCP is using the Linux default settings with TCP timestamp option enabled
87 * which uses 12 TCP option bytes per frame.
88 *
89 * These are not unreasonable assumptions:
90 *
91 * The internet does not really support MTUs greater than 1500, so most TCP traffic will
92 * be at that MTU, or slightly below it (worst case our upwards adjustment is too small).
93 *
94 * The chance our traffic isn't IP at all is basically zero, so the IP overhead correction
95 * is bound to be needed.
96 *
97 * Furthermore, the likelyhood that we're having to deal with GSO (ie. > MTU) packets that
98 * are not IP/TCP is pretty small (few other things are supported by Linux) and worse case
99 * our extra overhead will be slightly off, but probably still better than assuming none.
100 *
101 * Most servers are also Linux and thus support/default to using TCP timestamp option
102 * (and indeed TCP timestamp option comes from RFC 1323 titled "TCP Extensions for High
103 * Performance" which also defined TCP window scaling and are thus absolutely ancient...).
104 *
105 * All together this should be more correct than if we simply ignored GSO frames
106 * (ie. counted them as single packets with no extra overhead)
107 *
108 * Especially since the number of packets is important for any future clat offload correction.
109 * (which adjusts upward by 20 bytes per packet to account for ipv4 -> ipv6 header conversion)
110 */
111#define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey) \
112 static __always_inline inline void update_##the_stats_map(struct __sk_buff* skb, \
113 int direction, TypeOfKey* key) { \
114 StatsValue* value = bpf_##the_stats_map##_lookup_elem(key); \
115 if (!value) { \
116 StatsValue newValue = {}; \
117 bpf_##the_stats_map##_update_elem(key, &newValue, BPF_NOEXIST); \
118 value = bpf_##the_stats_map##_lookup_elem(key); \
119 } \
120 if (value) { \
121 const int mtu = 1500; \
122 uint64_t packets = 1; \
123 uint64_t bytes = skb->len; \
124 if (bytes > mtu) { \
125 bool is_ipv6 = (skb->protocol == htons(ETH_P_IPV6)); \
126 int ip_overhead = (is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr)); \
127 int tcp_overhead = ip_overhead + sizeof(struct tcphdr) + 12; \
128 int mss = mtu - tcp_overhead; \
129 uint64_t payload = bytes - tcp_overhead; \
130 packets = (payload + mss - 1) / mss; \
131 bytes = tcp_overhead * packets + payload; \
132 } \
133 if (direction == BPF_EGRESS) { \
134 __sync_fetch_and_add(&value->txPackets, packets); \
135 __sync_fetch_and_add(&value->txBytes, bytes); \
136 } else if (direction == BPF_INGRESS) { \
137 __sync_fetch_and_add(&value->rxPackets, packets); \
138 __sync_fetch_and_add(&value->rxBytes, bytes); \
139 } \
140 } \
141 }
142
143DEFINE_UPDATE_STATS(app_uid_stats_map, uint32_t)
144DEFINE_UPDATE_STATS(iface_stats_map, uint32_t)
145DEFINE_UPDATE_STATS(stats_map_A, StatsKey)
146DEFINE_UPDATE_STATS(stats_map_B, StatsKey)
147
148static inline bool skip_owner_match(struct __sk_buff* skb) {
149 int offset = -1;
150 int ret = 0;
151 if (skb->protocol == htons(ETH_P_IP)) {
152 offset = IP_PROTO_OFF;
153 uint8_t proto, ihl;
154 uint8_t flag;
155 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
156 if (!ret) {
157 if (proto == IPPROTO_ESP) {
158 return true;
159 } else if (proto == IPPROTO_TCP) {
160 ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1);
161 ihl = ihl & 0x0F;
162 ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1);
163 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
164 return true;
165 }
166 }
167 }
168 } else if (skb->protocol == htons(ETH_P_IPV6)) {
169 offset = IPV6_PROTO_OFF;
170 uint8_t proto;
171 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
172 if (!ret) {
173 if (proto == IPPROTO_ESP) {
174 return true;
175 } else if (proto == IPPROTO_TCP) {
176 uint8_t flag;
177 ret = bpf_skb_load_bytes(skb, sizeof(struct ipv6hdr) + TCP_FLAG_OFF, &flag, 1);
178 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
179 return true;
180 }
181 }
182 }
183 }
184 return false;
185}
186
187static __always_inline BpfConfig getConfig(uint32_t configKey) {
188 uint32_t mapSettingKey = configKey;
189 BpfConfig* config = bpf_configuration_map_lookup_elem(&mapSettingKey);
190 if (!config) {
191 // Couldn't read configuration entry. Assume everything is disabled.
192 return DEFAULT_CONFIG;
193 }
194 return *config;
195}
196
197static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid, int direction) {
198 if (skip_owner_match(skb)) return BPF_PASS;
199
200 if (is_system_uid(uid)) return BPF_PASS;
201
202 BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
203
204 UidOwnerValue* uidEntry = bpf_uid_owner_map_lookup_elem(&uid);
Motomu Utsumi7f9c79b2022-05-12 13:57:42 +0000205 uint32_t uidRules = uidEntry ? uidEntry->rule : 0;
Ken Chen587d4232022-01-17 17:18:43 +0800206 uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0;
207
208 if (enabledRules) {
209 if ((enabledRules & DOZABLE_MATCH) && !(uidRules & DOZABLE_MATCH)) {
210 return BPF_DROP;
211 }
212 if ((enabledRules & STANDBY_MATCH) && (uidRules & STANDBY_MATCH)) {
213 return BPF_DROP;
214 }
215 if ((enabledRules & POWERSAVE_MATCH) && !(uidRules & POWERSAVE_MATCH)) {
216 return BPF_DROP;
217 }
218 if ((enabledRules & RESTRICTED_MATCH) && !(uidRules & RESTRICTED_MATCH)) {
219 return BPF_DROP;
220 }
Robert Horvath54423022022-01-27 19:53:27 +0100221 if ((enabledRules & LOW_POWER_STANDBY_MATCH) && !(uidRules & LOW_POWER_STANDBY_MATCH)) {
222 return BPF_DROP;
223 }
Motomu Utsumi9cd47262022-06-01 13:57:27 +0000224 if ((enabledRules & OEM_DENY_1_MATCH) && (uidRules & OEM_DENY_1_MATCH)) {
225 return BPF_DROP;
226 }
227 if ((enabledRules & OEM_DENY_2_MATCH) && (uidRules & OEM_DENY_2_MATCH)) {
228 return BPF_DROP;
229 }
Motomu Utsumi608015f2022-06-06 07:44:05 +0000230 if ((enabledRules & OEM_DENY_3_MATCH) && (uidRules & OEM_DENY_3_MATCH)) {
231 return BPF_DROP;
232 }
Ken Chen587d4232022-01-17 17:18:43 +0800233 }
Motomu Utsumi966ff7f2022-05-11 05:56:26 +0000234 if (direction == BPF_INGRESS && skb->ifindex != 1) {
235 if (uidRules & IIF_MATCH) {
236 if (allowed_iif && skb->ifindex != allowed_iif) {
237 // Drops packets not coming from lo nor the allowed interface
238 // allowed interface=0 is a wildcard and does not drop packets
239 return BPF_DROP_UNLESS_DNS;
240 }
241 } else if (uidRules & LOCKDOWN_VPN_MATCH) {
242 // Drops packets not coming from lo and rule does not have IIF_MATCH but has
243 // LOCKDOWN_VPN_MATCH
Ken Chen587d4232022-01-17 17:18:43 +0800244 return BPF_DROP_UNLESS_DNS;
245 }
246 }
247 return BPF_PASS;
248}
249
250static __always_inline inline void update_stats_with_config(struct __sk_buff* skb, int direction,
Lorenzo Colitti90c0c3f2022-03-03 17:49:01 +0900251 StatsKey* key, uint32_t selectedMap) {
Ken Chen587d4232022-01-17 17:18:43 +0800252 if (selectedMap == SELECT_MAP_A) {
253 update_stats_map_A(skb, direction, key);
254 } else if (selectedMap == SELECT_MAP_B) {
255 update_stats_map_B(skb, direction, key);
256 }
257}
258
259static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) {
260 uint32_t sock_uid = bpf_get_socket_uid(skb);
261 uint64_t cookie = bpf_get_socket_cookie(skb);
262 UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
263 uint32_t uid, tag;
264 if (utag) {
265 uid = utag->uid;
266 tag = utag->tag;
267 } else {
268 uid = sock_uid;
269 tag = 0;
270 }
271
272 // Always allow and never count clat traffic. Only the IPv4 traffic on the stacked
273 // interface is accounted for and subject to usage restrictions.
274 // TODO: remove sock_uid check once Nat464Xlat javaland adds the socket tag AID_CLAT for clat.
275 if (sock_uid == AID_CLAT || uid == AID_CLAT) {
276 return BPF_PASS;
277 }
278
279 int match = bpf_owner_match(skb, sock_uid, direction);
280 if ((direction == BPF_EGRESS) && (match == BPF_DROP)) {
281 // If an outbound packet is going to be dropped, we do not count that
282 // traffic.
283 return match;
284 }
285
286// Workaround for secureVPN with VpnIsolation enabled, refer to b/159994981 for details.
287// Keep TAG_SYSTEM_DNS in sync with DnsResolver/include/netd_resolv/resolv.h
288// and TrafficStatsConstants.java
289#define TAG_SYSTEM_DNS 0xFFFFFF82
290 if (tag == TAG_SYSTEM_DNS && uid == AID_DNS) {
291 uid = sock_uid;
292 if (match == BPF_DROP_UNLESS_DNS) match = BPF_PASS;
293 } else {
294 if (match == BPF_DROP_UNLESS_DNS) match = BPF_DROP;
295 }
296
297 StatsKey key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
298
299 uint8_t* counterSet = bpf_uid_counterset_map_lookup_elem(&uid);
300 if (counterSet) key.counterSet = (uint32_t)*counterSet;
301
302 uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
Lorenzo Colitti90c0c3f2022-03-03 17:49:01 +0900303 uint32_t* selectedMap = bpf_configuration_map_lookup_elem(&mapSettingKey);
Ken Chen587d4232022-01-17 17:18:43 +0800304
305 // Use asm("%0 &= 1" : "+r"(match)) before return match,
306 // to help kernel's bpf verifier, so that it can be 100% certain
307 // that the returned value is always BPF_NOMATCH(0) or BPF_MATCH(1).
308 if (!selectedMap) {
309 asm("%0 &= 1" : "+r"(match));
310 return match;
311 }
312
313 if (key.tag) {
314 update_stats_with_config(skb, direction, &key, *selectedMap);
315 key.tag = 0;
316 }
317
318 update_stats_with_config(skb, direction, &key, *selectedMap);
319 update_app_uid_stats_map(skb, direction, &uid);
320 asm("%0 &= 1" : "+r"(match));
321 return match;
322}
323
Ken Chene541aa42022-02-09 10:00:30 +0800324DEFINE_BPF_PROG("cgroupskb/ingress/stats", AID_ROOT, AID_SYSTEM, bpf_cgroup_ingress)
Ken Chen587d4232022-01-17 17:18:43 +0800325(struct __sk_buff* skb) {
326 return bpf_traffic_account(skb, BPF_INGRESS);
327}
328
Ken Chene541aa42022-02-09 10:00:30 +0800329DEFINE_BPF_PROG("cgroupskb/egress/stats", AID_ROOT, AID_SYSTEM, bpf_cgroup_egress)
Ken Chen587d4232022-01-17 17:18:43 +0800330(struct __sk_buff* skb) {
331 return bpf_traffic_account(skb, BPF_EGRESS);
332}
333
Maciej Żenczykowski801154a2022-06-14 14:36:34 -0700334// WARNING: Android T's non-updatable netd depends on the name of this program.
Ken Chen587d4232022-01-17 17:18:43 +0800335DEFINE_BPF_PROG("skfilter/egress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_egress_prog)
336(struct __sk_buff* skb) {
337 // Clat daemon does not generate new traffic, all its traffic is accounted for already
338 // on the v4-* interfaces (except for the 20 (or 28) extra bytes of IPv6 vs IPv4 overhead,
339 // but that can be corrected for later when merging v4-foo stats into interface foo's).
340 // TODO: remove sock_uid check once Nat464Xlat javaland adds the socket tag AID_CLAT for clat.
341 uint32_t sock_uid = bpf_get_socket_uid(skb);
342 if (sock_uid == AID_CLAT) return BPF_NOMATCH;
343 if (sock_uid == AID_SYSTEM) {
344 uint64_t cookie = bpf_get_socket_cookie(skb);
345 UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
346 if (utag && utag->uid == AID_CLAT) return BPF_NOMATCH;
347 }
348
349 uint32_t key = skb->ifindex;
350 update_iface_stats_map(skb, BPF_EGRESS, &key);
351 return BPF_MATCH;
352}
353
Maciej Żenczykowski801154a2022-06-14 14:36:34 -0700354// WARNING: Android T's non-updatable netd depends on the name of this program.
Ken Chen587d4232022-01-17 17:18:43 +0800355DEFINE_BPF_PROG("skfilter/ingress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_ingress_prog)
356(struct __sk_buff* skb) {
357 // Clat daemon traffic is not accounted by virtue of iptables raw prerouting drop rule
358 // (in clat_raw_PREROUTING chain), which triggers before this (in bw_raw_PREROUTING chain).
359 // It will be accounted for on the v4-* clat interface instead.
360 // Keep that in mind when moving this out of iptables xt_bpf and into tc ingress (or xdp).
361
362 uint32_t key = skb->ifindex;
363 update_iface_stats_map(skb, BPF_INGRESS, &key);
364 return BPF_MATCH;
365}
366
367DEFINE_BPF_PROG("schedact/ingress/account", AID_ROOT, AID_NET_ADMIN, tc_bpf_ingress_account_prog)
368(struct __sk_buff* skb) {
Patrick Rohr148aea82022-02-24 14:12:32 +0100369 if (is_received_skb(skb)) {
370 // Account for ingress traffic before tc drops it.
371 uint32_t key = skb->ifindex;
372 update_iface_stats_map(skb, BPF_INGRESS, &key);
373 }
Ken Chen587d4232022-01-17 17:18:43 +0800374 return TC_ACT_UNSPEC;
375}
376
Maciej Żenczykowski801154a2022-06-14 14:36:34 -0700377// WARNING: Android T's non-updatable netd depends on the name of this program.
Ken Chen587d4232022-01-17 17:18:43 +0800378DEFINE_BPF_PROG("skfilter/allowlist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_allowlist_prog)
379(struct __sk_buff* skb) {
380 uint32_t sock_uid = bpf_get_socket_uid(skb);
381 if (is_system_uid(sock_uid)) return BPF_MATCH;
382
383 // 65534 is the overflow 'nobody' uid, usually this being returned means
384 // that skb->sk is NULL during RX (early decap socket lookup failure),
385 // which commonly happens for incoming packets to an unconnected udp socket.
386 // Additionally bpf_get_socket_cookie() returns 0 if skb->sk is NULL
387 if ((sock_uid == 65534) && !bpf_get_socket_cookie(skb) && is_received_skb(skb))
388 return BPF_MATCH;
389
390 UidOwnerValue* allowlistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
391 if (allowlistMatch) return allowlistMatch->rule & HAPPY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
392 return BPF_NOMATCH;
393}
394
Maciej Żenczykowski801154a2022-06-14 14:36:34 -0700395// WARNING: Android T's non-updatable netd depends on the name of this program.
Ken Chen587d4232022-01-17 17:18:43 +0800396DEFINE_BPF_PROG("skfilter/denylist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_denylist_prog)
397(struct __sk_buff* skb) {
398 uint32_t sock_uid = bpf_get_socket_uid(skb);
399 UidOwnerValue* denylistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
400 if (denylistMatch) return denylistMatch->rule & PENALTY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
401 return BPF_NOMATCH;
402}
403
404DEFINE_BPF_PROG_KVER("cgroupsock/inet/create", AID_ROOT, AID_ROOT, inet_socket_create,
405 KVER(4, 14, 0))
406(struct bpf_sock* sk) {
407 uint64_t gid_uid = bpf_get_current_uid_gid();
408 /*
409 * A given app is guaranteed to have the same app ID in all the profiles in
410 * which it is installed, and install permission is granted to app for all
411 * user at install time so we only check the appId part of a request uid at
412 * run time. See UserHandle#isSameApp for detail.
413 */
414 uint32_t appId = (gid_uid & 0xffffffff) % PER_USER_RANGE;
415 uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId);
416 if (!permissions) {
417 // UID not in map. Default to just INTERNET permission.
418 return 1;
419 }
420
421 // A return value of 1 means allow, everything else means deny.
422 return (*permissions & BPF_PERMISSION_INTERNET) == BPF_PERMISSION_INTERNET;
423}
424
425LICENSE("Apache 2.0");
426CRITICAL("netd");