blob: 72ee431fb4c029ae551a8158012a4bee4afd7ada [file] [log] [blame]
Ken Chen587d4232022-01-17 17:18:43 +08001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <bpf_helpers.h>
18#include <linux/bpf.h>
19#include <linux/if.h>
20#include <linux/if_ether.h>
21#include <linux/if_packet.h>
22#include <linux/in.h>
23#include <linux/in6.h>
24#include <linux/ip.h>
25#include <linux/ipv6.h>
26#include <linux/pkt_cls.h>
27#include <linux/tcp.h>
28#include <stdbool.h>
29#include <stdint.h>
30#include "bpf_net_helpers.h"
31#include "bpf_shared.h"
32
33// This is defined for cgroup bpf filter only.
34#define BPF_DROP_UNLESS_DNS 2
35#define BPF_PASS 1
36#define BPF_DROP 0
37
38// This is used for xt_bpf program only.
39#define BPF_NOMATCH 0
40#define BPF_MATCH 1
41
42#define BPF_EGRESS 0
43#define BPF_INGRESS 1
44
45#define IP_PROTO_OFF offsetof(struct iphdr, protocol)
46#define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr)
47#define IPPROTO_IHL_OFF 0
48#define TCP_FLAG_OFF 13
49#define RST_OFFSET 2
50
51DEFINE_BPF_MAP_GRW(cookie_tag_map, HASH, uint64_t, UidTagValue, COOKIE_UID_MAP_SIZE,
52 AID_NET_BW_ACCT)
53DEFINE_BPF_MAP_GRW(uid_counterset_map, HASH, uint32_t, uint8_t, UID_COUNTERSET_MAP_SIZE,
54 AID_NET_BW_ACCT)
55DEFINE_BPF_MAP_GRW(app_uid_stats_map, HASH, uint32_t, StatsValue, APP_STATS_MAP_SIZE,
56 AID_NET_BW_ACCT)
57DEFINE_BPF_MAP_GRW(stats_map_A, HASH, StatsKey, StatsValue, STATS_MAP_SIZE, AID_NET_BW_ACCT)
58DEFINE_BPF_MAP_GRW(stats_map_B, HASH, StatsKey, StatsValue, STATS_MAP_SIZE, AID_NET_BW_ACCT)
59DEFINE_BPF_MAP_GRW(iface_stats_map, HASH, uint32_t, StatsValue, IFACE_STATS_MAP_SIZE,
60 AID_NET_BW_ACCT)
61DEFINE_BPF_MAP_GRW(configuration_map, HASH, uint32_t, uint8_t, CONFIGURATION_MAP_SIZE,
62 AID_NET_BW_ACCT)
63DEFINE_BPF_MAP_GRW(uid_owner_map, HASH, uint32_t, UidOwnerValue, UID_OWNER_MAP_SIZE,
64 AID_NET_BW_ACCT)
65DEFINE_BPF_MAP_GRW(uid_permission_map, HASH, uint32_t, uint8_t, UID_OWNER_MAP_SIZE, AID_NET_BW_ACCT)
66
67/* never actually used from ebpf */
68DEFINE_BPF_MAP_GRW(iface_index_name_map, HASH, uint32_t, IfaceValue, IFACE_INDEX_NAME_MAP_SIZE,
69 AID_NET_BW_ACCT)
70
71static __always_inline int is_system_uid(uint32_t uid) {
72 return (uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID);
73}
74
75/*
76 * Note: this blindly assumes an MTU of 1500, and that packets > MTU are always TCP,
77 * and that TCP is using the Linux default settings with TCP timestamp option enabled
78 * which uses 12 TCP option bytes per frame.
79 *
80 * These are not unreasonable assumptions:
81 *
82 * The internet does not really support MTUs greater than 1500, so most TCP traffic will
83 * be at that MTU, or slightly below it (worst case our upwards adjustment is too small).
84 *
85 * The chance our traffic isn't IP at all is basically zero, so the IP overhead correction
86 * is bound to be needed.
87 *
88 * Furthermore, the likelyhood that we're having to deal with GSO (ie. > MTU) packets that
89 * are not IP/TCP is pretty small (few other things are supported by Linux) and worse case
90 * our extra overhead will be slightly off, but probably still better than assuming none.
91 *
92 * Most servers are also Linux and thus support/default to using TCP timestamp option
93 * (and indeed TCP timestamp option comes from RFC 1323 titled "TCP Extensions for High
94 * Performance" which also defined TCP window scaling and are thus absolutely ancient...).
95 *
96 * All together this should be more correct than if we simply ignored GSO frames
97 * (ie. counted them as single packets with no extra overhead)
98 *
99 * Especially since the number of packets is important for any future clat offload correction.
100 * (which adjusts upward by 20 bytes per packet to account for ipv4 -> ipv6 header conversion)
101 */
102#define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey) \
103 static __always_inline inline void update_##the_stats_map(struct __sk_buff* skb, \
104 int direction, TypeOfKey* key) { \
105 StatsValue* value = bpf_##the_stats_map##_lookup_elem(key); \
106 if (!value) { \
107 StatsValue newValue = {}; \
108 bpf_##the_stats_map##_update_elem(key, &newValue, BPF_NOEXIST); \
109 value = bpf_##the_stats_map##_lookup_elem(key); \
110 } \
111 if (value) { \
112 const int mtu = 1500; \
113 uint64_t packets = 1; \
114 uint64_t bytes = skb->len; \
115 if (bytes > mtu) { \
116 bool is_ipv6 = (skb->protocol == htons(ETH_P_IPV6)); \
117 int ip_overhead = (is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr)); \
118 int tcp_overhead = ip_overhead + sizeof(struct tcphdr) + 12; \
119 int mss = mtu - tcp_overhead; \
120 uint64_t payload = bytes - tcp_overhead; \
121 packets = (payload + mss - 1) / mss; \
122 bytes = tcp_overhead * packets + payload; \
123 } \
124 if (direction == BPF_EGRESS) { \
125 __sync_fetch_and_add(&value->txPackets, packets); \
126 __sync_fetch_and_add(&value->txBytes, bytes); \
127 } else if (direction == BPF_INGRESS) { \
128 __sync_fetch_and_add(&value->rxPackets, packets); \
129 __sync_fetch_and_add(&value->rxBytes, bytes); \
130 } \
131 } \
132 }
133
134DEFINE_UPDATE_STATS(app_uid_stats_map, uint32_t)
135DEFINE_UPDATE_STATS(iface_stats_map, uint32_t)
136DEFINE_UPDATE_STATS(stats_map_A, StatsKey)
137DEFINE_UPDATE_STATS(stats_map_B, StatsKey)
138
139static inline bool skip_owner_match(struct __sk_buff* skb) {
140 int offset = -1;
141 int ret = 0;
142 if (skb->protocol == htons(ETH_P_IP)) {
143 offset = IP_PROTO_OFF;
144 uint8_t proto, ihl;
145 uint8_t flag;
146 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
147 if (!ret) {
148 if (proto == IPPROTO_ESP) {
149 return true;
150 } else if (proto == IPPROTO_TCP) {
151 ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1);
152 ihl = ihl & 0x0F;
153 ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1);
154 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
155 return true;
156 }
157 }
158 }
159 } else if (skb->protocol == htons(ETH_P_IPV6)) {
160 offset = IPV6_PROTO_OFF;
161 uint8_t proto;
162 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
163 if (!ret) {
164 if (proto == IPPROTO_ESP) {
165 return true;
166 } else if (proto == IPPROTO_TCP) {
167 uint8_t flag;
168 ret = bpf_skb_load_bytes(skb, sizeof(struct ipv6hdr) + TCP_FLAG_OFF, &flag, 1);
169 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
170 return true;
171 }
172 }
173 }
174 }
175 return false;
176}
177
178static __always_inline BpfConfig getConfig(uint32_t configKey) {
179 uint32_t mapSettingKey = configKey;
180 BpfConfig* config = bpf_configuration_map_lookup_elem(&mapSettingKey);
181 if (!config) {
182 // Couldn't read configuration entry. Assume everything is disabled.
183 return DEFAULT_CONFIG;
184 }
185 return *config;
186}
187
188static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid, int direction) {
189 if (skip_owner_match(skb)) return BPF_PASS;
190
191 if (is_system_uid(uid)) return BPF_PASS;
192
193 BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
194
195 UidOwnerValue* uidEntry = bpf_uid_owner_map_lookup_elem(&uid);
196 uint8_t uidRules = uidEntry ? uidEntry->rule : 0;
197 uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0;
198
199 if (enabledRules) {
200 if ((enabledRules & DOZABLE_MATCH) && !(uidRules & DOZABLE_MATCH)) {
201 return BPF_DROP;
202 }
203 if ((enabledRules & STANDBY_MATCH) && (uidRules & STANDBY_MATCH)) {
204 return BPF_DROP;
205 }
206 if ((enabledRules & POWERSAVE_MATCH) && !(uidRules & POWERSAVE_MATCH)) {
207 return BPF_DROP;
208 }
209 if ((enabledRules & RESTRICTED_MATCH) && !(uidRules & RESTRICTED_MATCH)) {
210 return BPF_DROP;
211 }
212 }
213 if (direction == BPF_INGRESS && (uidRules & IIF_MATCH)) {
214 // Drops packets not coming from lo nor the allowlisted interface
215 if (allowed_iif && skb->ifindex != 1 && skb->ifindex != allowed_iif) {
216 return BPF_DROP_UNLESS_DNS;
217 }
218 }
219 return BPF_PASS;
220}
221
222static __always_inline inline void update_stats_with_config(struct __sk_buff* skb, int direction,
223 StatsKey* key, uint8_t selectedMap) {
224 if (selectedMap == SELECT_MAP_A) {
225 update_stats_map_A(skb, direction, key);
226 } else if (selectedMap == SELECT_MAP_B) {
227 update_stats_map_B(skb, direction, key);
228 }
229}
230
231static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) {
232 uint32_t sock_uid = bpf_get_socket_uid(skb);
233 uint64_t cookie = bpf_get_socket_cookie(skb);
234 UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
235 uint32_t uid, tag;
236 if (utag) {
237 uid = utag->uid;
238 tag = utag->tag;
239 } else {
240 uid = sock_uid;
241 tag = 0;
242 }
243
244 // Always allow and never count clat traffic. Only the IPv4 traffic on the stacked
245 // interface is accounted for and subject to usage restrictions.
246 // TODO: remove sock_uid check once Nat464Xlat javaland adds the socket tag AID_CLAT for clat.
247 if (sock_uid == AID_CLAT || uid == AID_CLAT) {
248 return BPF_PASS;
249 }
250
251 int match = bpf_owner_match(skb, sock_uid, direction);
252 if ((direction == BPF_EGRESS) && (match == BPF_DROP)) {
253 // If an outbound packet is going to be dropped, we do not count that
254 // traffic.
255 return match;
256 }
257
258// Workaround for secureVPN with VpnIsolation enabled, refer to b/159994981 for details.
259// Keep TAG_SYSTEM_DNS in sync with DnsResolver/include/netd_resolv/resolv.h
260// and TrafficStatsConstants.java
261#define TAG_SYSTEM_DNS 0xFFFFFF82
262 if (tag == TAG_SYSTEM_DNS && uid == AID_DNS) {
263 uid = sock_uid;
264 if (match == BPF_DROP_UNLESS_DNS) match = BPF_PASS;
265 } else {
266 if (match == BPF_DROP_UNLESS_DNS) match = BPF_DROP;
267 }
268
269 StatsKey key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
270
271 uint8_t* counterSet = bpf_uid_counterset_map_lookup_elem(&uid);
272 if (counterSet) key.counterSet = (uint32_t)*counterSet;
273
274 uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
275 uint8_t* selectedMap = bpf_configuration_map_lookup_elem(&mapSettingKey);
276
277 // Use asm("%0 &= 1" : "+r"(match)) before return match,
278 // to help kernel's bpf verifier, so that it can be 100% certain
279 // that the returned value is always BPF_NOMATCH(0) or BPF_MATCH(1).
280 if (!selectedMap) {
281 asm("%0 &= 1" : "+r"(match));
282 return match;
283 }
284
285 if (key.tag) {
286 update_stats_with_config(skb, direction, &key, *selectedMap);
287 key.tag = 0;
288 }
289
290 update_stats_with_config(skb, direction, &key, *selectedMap);
291 update_app_uid_stats_map(skb, direction, &uid);
292 asm("%0 &= 1" : "+r"(match));
293 return match;
294}
295
296DEFINE_BPF_PROG("cgroupskb/ingress/stats", AID_ROOT, AID_ROOT, bpf_cgroup_ingress)
297(struct __sk_buff* skb) {
298 return bpf_traffic_account(skb, BPF_INGRESS);
299}
300
301DEFINE_BPF_PROG("cgroupskb/egress/stats", AID_ROOT, AID_ROOT, bpf_cgroup_egress)
302(struct __sk_buff* skb) {
303 return bpf_traffic_account(skb, BPF_EGRESS);
304}
305
306DEFINE_BPF_PROG("skfilter/egress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_egress_prog)
307(struct __sk_buff* skb) {
308 // Clat daemon does not generate new traffic, all its traffic is accounted for already
309 // on the v4-* interfaces (except for the 20 (or 28) extra bytes of IPv6 vs IPv4 overhead,
310 // but that can be corrected for later when merging v4-foo stats into interface foo's).
311 // TODO: remove sock_uid check once Nat464Xlat javaland adds the socket tag AID_CLAT for clat.
312 uint32_t sock_uid = bpf_get_socket_uid(skb);
313 if (sock_uid == AID_CLAT) return BPF_NOMATCH;
314 if (sock_uid == AID_SYSTEM) {
315 uint64_t cookie = bpf_get_socket_cookie(skb);
316 UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
317 if (utag && utag->uid == AID_CLAT) return BPF_NOMATCH;
318 }
319
320 uint32_t key = skb->ifindex;
321 update_iface_stats_map(skb, BPF_EGRESS, &key);
322 return BPF_MATCH;
323}
324
325DEFINE_BPF_PROG("skfilter/ingress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_ingress_prog)
326(struct __sk_buff* skb) {
327 // Clat daemon traffic is not accounted by virtue of iptables raw prerouting drop rule
328 // (in clat_raw_PREROUTING chain), which triggers before this (in bw_raw_PREROUTING chain).
329 // It will be accounted for on the v4-* clat interface instead.
330 // Keep that in mind when moving this out of iptables xt_bpf and into tc ingress (or xdp).
331
332 uint32_t key = skb->ifindex;
333 update_iface_stats_map(skb, BPF_INGRESS, &key);
334 return BPF_MATCH;
335}
336
337DEFINE_BPF_PROG("schedact/ingress/account", AID_ROOT, AID_NET_ADMIN, tc_bpf_ingress_account_prog)
338(struct __sk_buff* skb) {
339 // Account for ingress traffic before tc drops it.
340 uint32_t key = skb->ifindex;
341 update_iface_stats_map(skb, BPF_INGRESS, &key);
342 return TC_ACT_UNSPEC;
343}
344
345DEFINE_BPF_PROG("skfilter/allowlist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_allowlist_prog)
346(struct __sk_buff* skb) {
347 uint32_t sock_uid = bpf_get_socket_uid(skb);
348 if (is_system_uid(sock_uid)) return BPF_MATCH;
349
350 // 65534 is the overflow 'nobody' uid, usually this being returned means
351 // that skb->sk is NULL during RX (early decap socket lookup failure),
352 // which commonly happens for incoming packets to an unconnected udp socket.
353 // Additionally bpf_get_socket_cookie() returns 0 if skb->sk is NULL
354 if ((sock_uid == 65534) && !bpf_get_socket_cookie(skb) && is_received_skb(skb))
355 return BPF_MATCH;
356
357 UidOwnerValue* allowlistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
358 if (allowlistMatch) return allowlistMatch->rule & HAPPY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
359 return BPF_NOMATCH;
360}
361
362DEFINE_BPF_PROG("skfilter/denylist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_denylist_prog)
363(struct __sk_buff* skb) {
364 uint32_t sock_uid = bpf_get_socket_uid(skb);
365 UidOwnerValue* denylistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
366 if (denylistMatch) return denylistMatch->rule & PENALTY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
367 return BPF_NOMATCH;
368}
369
370DEFINE_BPF_PROG_KVER("cgroupsock/inet/create", AID_ROOT, AID_ROOT, inet_socket_create,
371 KVER(4, 14, 0))
372(struct bpf_sock* sk) {
373 uint64_t gid_uid = bpf_get_current_uid_gid();
374 /*
375 * A given app is guaranteed to have the same app ID in all the profiles in
376 * which it is installed, and install permission is granted to app for all
377 * user at install time so we only check the appId part of a request uid at
378 * run time. See UserHandle#isSameApp for detail.
379 */
380 uint32_t appId = (gid_uid & 0xffffffff) % PER_USER_RANGE;
381 uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId);
382 if (!permissions) {
383 // UID not in map. Default to just INTERNET permission.
384 return 1;
385 }
386
387 // A return value of 1 means allow, everything else means deny.
388 return (*permissions & BPF_PERMISSION_INTERNET) == BPF_PERMISSION_INTERNET;
389}
390
391LICENSE("Apache 2.0");
392CRITICAL("netd");