blob: e9e1477ff1e08695b54bdeb0386b1273f5ad2e0b [file] [log] [blame]
Chenbo Feng2236e1b2019-02-26 14:30:19 -08001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080017#include <bpf_helpers.h>
Chenbo Feng2236e1b2019-02-26 14:30:19 -080018#include <linux/bpf.h>
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080019#include <linux/if.h>
20#include <linux/if_ether.h>
Maciej Żenczykowskic6c8d4f2019-09-19 08:28:19 -070021#include <linux/if_packet.h>
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080022#include <linux/in.h>
23#include <linux/in6.h>
24#include <linux/ip.h>
25#include <linux/ipv6.h>
Maciej Żenczykowski3aaf2942020-01-04 05:09:45 -080026#include <linux/tcp.h>
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080027#include <stdbool.h>
28#include <stdint.h>
29#include "bpf_net_helpers.h"
30#include "netdbpf/bpf_shared.h"
31
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080032// This is defined for cgroup bpf filter only.
Maciej Żenczykowskie9d140b2020-07-02 04:08:59 -070033#define BPF_DROP_UNLESS_DNS 2
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080034#define BPF_PASS 1
35#define BPF_DROP 0
36
37// This is used for xt_bpf program only.
38#define BPF_NOMATCH 0
39#define BPF_MATCH 1
40
41#define BPF_EGRESS 0
42#define BPF_INGRESS 1
43
44#define IP_PROTO_OFF offsetof(struct iphdr, protocol)
45#define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr)
46#define IPPROTO_IHL_OFF 0
47#define TCP_FLAG_OFF 13
48#define RST_OFFSET 2
49
Maciej Żenczykowski565b6fb2020-01-27 01:58:40 -080050DEFINE_BPF_MAP_GRO(cookie_tag_map, HASH, uint64_t, UidTagValue, COOKIE_UID_MAP_SIZE,
51 AID_NET_BW_ACCT)
52DEFINE_BPF_MAP_GRO(uid_counterset_map, HASH, uint32_t, uint8_t, UID_COUNTERSET_MAP_SIZE,
53 AID_NET_BW_ACCT)
54DEFINE_BPF_MAP_GRO(app_uid_stats_map, HASH, uint32_t, StatsValue, APP_STATS_MAP_SIZE,
55 AID_NET_BW_STATS)
56DEFINE_BPF_MAP_GRW(stats_map_A, HASH, StatsKey, StatsValue, STATS_MAP_SIZE, AID_NET_BW_STATS)
57DEFINE_BPF_MAP_GRW(stats_map_B, HASH, StatsKey, StatsValue, STATS_MAP_SIZE, AID_NET_BW_STATS)
58DEFINE_BPF_MAP_GRO(iface_stats_map, HASH, uint32_t, StatsValue, IFACE_STATS_MAP_SIZE,
59 AID_NET_BW_STATS)
60DEFINE_BPF_MAP_GRO(configuration_map, HASH, uint32_t, uint8_t, CONFIGURATION_MAP_SIZE,
61 AID_NET_BW_STATS)
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080062DEFINE_BPF_MAP(uid_owner_map, HASH, uint32_t, UidOwnerValue, UID_OWNER_MAP_SIZE)
63
64/* never actually used from ebpf */
Maciej Żenczykowski565b6fb2020-01-27 01:58:40 -080065DEFINE_BPF_MAP_GRO(iface_index_name_map, HASH, uint32_t, IfaceValue, IFACE_INDEX_NAME_MAP_SIZE,
66 AID_NET_BW_STATS)
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080067
68static __always_inline int is_system_uid(uint32_t uid) {
69 return (uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID);
70}
71
Maciej Żenczykowski3aaf2942020-01-04 05:09:45 -080072/*
73 * Note: this blindly assumes an MTU of 1500, and that packets > MTU are always TCP,
74 * and that TCP is using the Linux default settings with TCP timestamp option enabled
75 * which uses 12 TCP option bytes per frame.
76 *
77 * These are not unreasonable assumptions:
78 *
79 * The internet does not really support MTUs greater than 1500, so most TCP traffic will
80 * be at that MTU, or slightly below it (worst case our upwards adjustment is too small).
81 *
82 * The chance our traffic isn't IP at all is basically zero, so the IP overhead correction
83 * is bound to be needed.
84 *
85 * Furthermore, the likelyhood that we're having to deal with GSO (ie. > MTU) packets that
86 * are not IP/TCP is pretty small (few other things are supported by Linux) and worse case
87 * our extra overhead will be slightly off, but probably still better than assuming none.
88 *
89 * Most servers are also Linux and thus support/default to using TCP timestamp option
90 * (and indeed TCP timestamp option comes from RFC 1323 titled "TCP Extensions for High
91 * Performance" which also defined TCP window scaling and are thus absolutely ancient...).
92 *
93 * All together this should be more correct than if we simply ignored GSO frames
94 * (ie. counted them as single packets with no extra overhead)
95 *
96 * Especially since the number of packets is important for any future clat offload correction.
97 * (which adjusts upward by 20 bytes per packet to account for ipv4 -> ipv6 header conversion)
98 */
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080099#define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey) \
100 static __always_inline inline void update_##the_stats_map(struct __sk_buff* skb, \
101 int direction, TypeOfKey* key) { \
Maciej Żenczykowski3aaf2942020-01-04 05:09:45 -0800102 StatsValue* value = bpf_##the_stats_map##_lookup_elem(key); \
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800103 if (!value) { \
Maciej Żenczykowski11ec78b2019-12-30 06:39:32 -0800104 StatsValue newValue = {}; \
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800105 bpf_##the_stats_map##_update_elem(key, &newValue, BPF_NOEXIST); \
106 value = bpf_##the_stats_map##_lookup_elem(key); \
107 } \
108 if (value) { \
Maciej Żenczykowski3aaf2942020-01-04 05:09:45 -0800109 const int mtu = 1500; \
110 uint64_t packets = 1; \
111 uint64_t bytes = skb->len; \
112 if (bytes > mtu) { \
113 bool is_ipv6 = (skb->protocol == htons(ETH_P_IPV6)); \
114 int ip_overhead = (is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr)); \
115 int tcp_overhead = ip_overhead + sizeof(struct tcphdr) + 12; \
116 int mss = mtu - tcp_overhead; \
117 uint64_t payload = bytes - tcp_overhead; \
118 packets = (payload + mss - 1) / mss; \
119 bytes = tcp_overhead * packets + payload; \
120 } \
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800121 if (direction == BPF_EGRESS) { \
Maciej Żenczykowski3aaf2942020-01-04 05:09:45 -0800122 __sync_fetch_and_add(&value->txPackets, packets); \
123 __sync_fetch_and_add(&value->txBytes, bytes); \
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800124 } else if (direction == BPF_INGRESS) { \
Maciej Żenczykowski3aaf2942020-01-04 05:09:45 -0800125 __sync_fetch_and_add(&value->rxPackets, packets); \
126 __sync_fetch_and_add(&value->rxBytes, bytes); \
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800127 } \
128 } \
129 }
130
131DEFINE_UPDATE_STATS(app_uid_stats_map, uint32_t)
132DEFINE_UPDATE_STATS(iface_stats_map, uint32_t)
Maciej Żenczykowski11ec78b2019-12-30 06:39:32 -0800133DEFINE_UPDATE_STATS(stats_map_A, StatsKey)
134DEFINE_UPDATE_STATS(stats_map_B, StatsKey)
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800135
136static inline bool skip_owner_match(struct __sk_buff* skb) {
137 int offset = -1;
138 int ret = 0;
139 if (skb->protocol == htons(ETH_P_IP)) {
140 offset = IP_PROTO_OFF;
141 uint8_t proto, ihl;
Yabin Cuie618bfb2020-04-25 21:49:18 -0700142 uint8_t flag;
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800143 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
144 if (!ret) {
145 if (proto == IPPROTO_ESP) {
146 return true;
147 } else if (proto == IPPROTO_TCP) {
148 ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1);
149 ihl = ihl & 0x0F;
150 ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1);
151 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
152 return true;
153 }
154 }
155 }
156 } else if (skb->protocol == htons(ETH_P_IPV6)) {
157 offset = IPV6_PROTO_OFF;
158 uint8_t proto;
159 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
160 if (!ret) {
161 if (proto == IPPROTO_ESP) {
162 return true;
163 } else if (proto == IPPROTO_TCP) {
Yabin Cuie618bfb2020-04-25 21:49:18 -0700164 uint8_t flag;
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800165 ret = bpf_skb_load_bytes(skb, sizeof(struct ipv6hdr) + TCP_FLAG_OFF, &flag, 1);
166 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
167 return true;
168 }
169 }
170 }
171 }
172 return false;
173}
174
175static __always_inline BpfConfig getConfig(uint32_t configKey) {
176 uint32_t mapSettingKey = configKey;
177 BpfConfig* config = bpf_configuration_map_lookup_elem(&mapSettingKey);
178 if (!config) {
179 // Couldn't read configuration entry. Assume everything is disabled.
180 return DEFAULT_CONFIG;
181 }
182 return *config;
183}
184
185static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid, int direction) {
186 if (skip_owner_match(skb)) return BPF_PASS;
187
Ken Chen296fe572020-05-26 02:08:09 +0800188 if (is_system_uid(uid)) return BPF_PASS;
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800189
190 BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
191
192 UidOwnerValue* uidEntry = bpf_uid_owner_map_lookup_elem(&uid);
193 uint8_t uidRules = uidEntry ? uidEntry->rule : 0;
194 uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0;
195
196 if (enabledRules) {
197 if ((enabledRules & DOZABLE_MATCH) && !(uidRules & DOZABLE_MATCH)) {
198 return BPF_DROP;
199 }
200 if ((enabledRules & STANDBY_MATCH) && (uidRules & STANDBY_MATCH)) {
201 return BPF_DROP;
202 }
203 if ((enabledRules & POWERSAVE_MATCH) && !(uidRules & POWERSAVE_MATCH)) {
204 return BPF_DROP;
205 }
Patrick Rohrfa0036f2020-12-02 16:22:28 +0100206 if ((enabledRules & RESTRICTED_MATCH) && !(uidRules & RESTRICTED_MATCH)) {
207 return BPF_DROP;
208 }
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800209 }
210 if (direction == BPF_INGRESS && (uidRules & IIF_MATCH)) {
Lorenzo Colitticdd79f12020-07-30 12:03:40 +0900211 // Drops packets not coming from lo nor the allowlisted interface
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800212 if (allowed_iif && skb->ifindex != 1 && skb->ifindex != allowed_iif) {
Maciej Żenczykowskie9d140b2020-07-02 04:08:59 -0700213 return BPF_DROP_UNLESS_DNS;
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800214 }
215 }
216 return BPF_PASS;
217}
218
219static __always_inline inline void update_stats_with_config(struct __sk_buff* skb, int direction,
Maciej Żenczykowski11ec78b2019-12-30 06:39:32 -0800220 StatsKey* key, uint8_t selectedMap) {
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800221 if (selectedMap == SELECT_MAP_A) {
222 update_stats_map_A(skb, direction, key);
223 } else if (selectedMap == SELECT_MAP_B) {
224 update_stats_map_B(skb, direction, key);
225 }
226}
227
228static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) {
229 uint32_t sock_uid = bpf_get_socket_uid(skb);
Maciej Żenczykowskif14b23d2020-05-27 17:33:15 -0700230 // Always allow and never count clat traffic. Only the IPv4 traffic on the stacked
231 // interface is accounted for and subject to usage restrictions.
232 if (sock_uid == AID_CLAT) {
233 return BPF_PASS;
234 }
235
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800236 int match = bpf_owner_match(skb, sock_uid, direction);
237 if ((direction == BPF_EGRESS) && (match == BPF_DROP)) {
238 // If an outbound packet is going to be dropped, we do not count that
239 // traffic.
240 return match;
241 }
242
243 uint64_t cookie = bpf_get_socket_cookie(skb);
Maciej Żenczykowski11ec78b2019-12-30 06:39:32 -0800244 UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800245 uint32_t uid, tag;
246 if (utag) {
247 uid = utag->uid;
248 tag = utag->tag;
249 } else {
250 uid = sock_uid;
251 tag = 0;
252 }
253
Maciej Żenczykowskie9d140b2020-07-02 04:08:59 -0700254// Workaround for secureVPN with VpnIsolation enabled, refer to b/159994981 for details.
255// Keep TAG_SYSTEM_DNS in sync with DnsResolver/include/netd_resolv/resolv.h
256// and TrafficStatsConstants.java
257#define TAG_SYSTEM_DNS 0xFFFFFF82
258 if (tag == TAG_SYSTEM_DNS && uid == AID_DNS) {
259 uid = sock_uid;
260 if (match == BPF_DROP_UNLESS_DNS) match = BPF_PASS;
261 } else {
262 if (match == BPF_DROP_UNLESS_DNS) match = BPF_DROP;
263 }
264
Maciej Żenczykowski11ec78b2019-12-30 06:39:32 -0800265 StatsKey key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800266
267 uint8_t* counterSet = bpf_uid_counterset_map_lookup_elem(&uid);
268 if (counterSet) key.counterSet = (uint32_t)*counterSet;
269
270 uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
271 uint8_t* selectedMap = bpf_configuration_map_lookup_elem(&mapSettingKey);
272 if (!selectedMap) {
273 return match;
274 }
275
276 if (key.tag) {
277 update_stats_with_config(skb, direction, &key, *selectedMap);
278 key.tag = 0;
279 }
280
281 update_stats_with_config(skb, direction, &key, *selectedMap);
282 update_app_uid_stats_map(skb, direction, &uid);
283 return match;
284}
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800285
Maciej Żenczykowski44159382021-01-18 00:07:04 -0800286DEFINE_BPF_PROG("cgroupskb/ingress/stats", AID_ROOT, AID_ROOT, bpf_cgroup_ingress)
287(struct __sk_buff* skb) {
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800288 return bpf_traffic_account(skb, BPF_INGRESS);
289}
290
Maciej Żenczykowski44159382021-01-18 00:07:04 -0800291DEFINE_BPF_PROG("cgroupskb/egress/stats", AID_ROOT, AID_ROOT, bpf_cgroup_egress)
292(struct __sk_buff* skb) {
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800293 return bpf_traffic_account(skb, BPF_EGRESS);
294}
295
Maciej Żenczykowski565b6fb2020-01-27 01:58:40 -0800296DEFINE_BPF_PROG("skfilter/egress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_egress_prog)
297(struct __sk_buff* skb) {
Maciej Żenczykowski3a6d5d72020-05-23 14:44:29 -0700298 // Clat daemon does not generate new traffic, all its traffic is accounted for already
299 // on the v4-* interfaces (except for the 20 (or 28) extra bytes of IPv6 vs IPv4 overhead,
300 // but that can be corrected for later when merging v4-foo stats into interface foo's).
301 uint32_t sock_uid = bpf_get_socket_uid(skb);
302 if (sock_uid == AID_CLAT) return BPF_NOMATCH;
303
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800304 uint32_t key = skb->ifindex;
Maciej Żenczykowski0f24d322019-04-19 17:51:33 -0700305 update_iface_stats_map(skb, BPF_EGRESS, &key);
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800306 return BPF_MATCH;
307}
308
Maciej Żenczykowski565b6fb2020-01-27 01:58:40 -0800309DEFINE_BPF_PROG("skfilter/ingress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_ingress_prog)
310(struct __sk_buff* skb) {
Maciej Żenczykowski3a6d5d72020-05-23 14:44:29 -0700311 // Clat daemon traffic is not accounted by virtue of iptables raw prerouting drop rule
312 // (in clat_raw_PREROUTING chain), which triggers before this (in bw_raw_PREROUTING chain).
313 // It will be accounted for on the v4-* clat interface instead.
314 // Keep that in mind when moving this out of iptables xt_bpf and into tc ingress (or xdp).
315
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800316 uint32_t key = skb->ifindex;
Maciej Żenczykowski0f24d322019-04-19 17:51:33 -0700317 update_iface_stats_map(skb, BPF_INGRESS, &key);
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800318 return BPF_MATCH;
319}
320
Lorenzo Colitticdd79f12020-07-30 12:03:40 +0900321DEFINE_BPF_PROG("skfilter/allowlist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_allowlist_prog)
Maciej Żenczykowski565b6fb2020-01-27 01:58:40 -0800322(struct __sk_buff* skb) {
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800323 uint32_t sock_uid = bpf_get_socket_uid(skb);
324 if (is_system_uid(sock_uid)) return BPF_MATCH;
Maciej Żenczykowskic6c8d4f2019-09-19 08:28:19 -0700325
326 // 65534 is the overflow 'nobody' uid, usually this being returned means
327 // that skb->sk is NULL during RX (early decap socket lookup failure),
328 // which commonly happens for incoming packets to an unconnected udp socket.
329 // Additionally bpf_get_socket_cookie() returns 0 if skb->sk is NULL
Maciej Żenczykowski0b60d602019-10-30 23:51:34 -0700330 if ((sock_uid == 65534) && !bpf_get_socket_cookie(skb) && is_received_skb(skb))
Maciej Żenczykowskic6c8d4f2019-09-19 08:28:19 -0700331 return BPF_MATCH;
332
Lorenzo Colitticdd79f12020-07-30 12:03:40 +0900333 UidOwnerValue* allowlistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
334 if (allowlistMatch) return allowlistMatch->rule & HAPPY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800335 return BPF_NOMATCH;
336}
337
Lorenzo Colitticdd79f12020-07-30 12:03:40 +0900338DEFINE_BPF_PROG("skfilter/denylist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_denylist_prog)
Maciej Żenczykowski565b6fb2020-01-27 01:58:40 -0800339(struct __sk_buff* skb) {
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800340 uint32_t sock_uid = bpf_get_socket_uid(skb);
Lorenzo Colitticdd79f12020-07-30 12:03:40 +0900341 UidOwnerValue* denylistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
342 if (denylistMatch) return denylistMatch->rule & PENALTY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800343 return BPF_NOMATCH;
344}
345
Maciej Żenczykowskib8a7be52019-04-19 21:47:08 -0700346DEFINE_BPF_MAP(uid_permission_map, HASH, uint32_t, uint8_t, UID_OWNER_MAP_SIZE)
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800347
Maciej Żenczykowskic07dbe42020-02-19 14:16:56 -0800348DEFINE_BPF_PROG_KVER("cgroupsock/inet/create", AID_ROOT, AID_ROOT, inet_socket_create,
349 KVER(4, 14, 0))
350(struct bpf_sock* sk) {
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800351 uint64_t gid_uid = bpf_get_current_uid_gid();
352 /*
353 * A given app is guaranteed to have the same app ID in all the profiles in
354 * which it is installed, and install permission is granted to app for all
355 * user at install time so we only check the appId part of a request uid at
356 * run time. See UserHandle#isSameApp for detail.
357 */
358 uint32_t appId = (gid_uid & 0xffffffff) % PER_USER_RANGE;
Maciej Żenczykowskib8a7be52019-04-19 21:47:08 -0700359 uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId);
Chenbo Fengbf660aa2019-02-26 16:12:27 -0800360 if (!permissions) {
361 // UID not in map. Default to just INTERNET permission.
362 return 1;
363 }
364
365 // A return value of 1 means allow, everything else means deny.
366 return (*permissions & BPF_PERMISSION_INTERNET) == BPF_PERMISSION_INTERNET;
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800367}
368
Maciej Żenczykowski5e68a9f2020-02-21 11:05:17 -0800369LICENSE("Apache 2.0");
Maciej Żenczykowskiaaae7a62020-06-10 15:50:17 -0700370CRITICAL("netd");