blob: 7e5218f85e22730a57f7b37e01be9bb2dc63b2e9 [file] [log] [blame]
Ken Chen587d4232022-01-17 17:18:43 +08001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Maciej Żenczykowskif7699522022-05-24 15:56:03 -070017// The resulting .o needs to load on the Android T Beta 3 bpfloader
18#define BPFLOADER_MIN_VER BPFLOADER_T_BETA3_VERSION
Maciej Żenczykowskiacebffb2022-05-16 16:05:15 -070019
Maciej Żenczykowski1c52aa12022-06-16 22:43:13 -070020#define V18
21
Ken Chen587d4232022-01-17 17:18:43 +080022#include <bpf_helpers.h>
23#include <linux/bpf.h>
24#include <linux/if.h>
25#include <linux/if_ether.h>
26#include <linux/if_packet.h>
27#include <linux/in.h>
28#include <linux/in6.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/pkt_cls.h>
32#include <linux/tcp.h>
Ken Chen587d4232022-01-17 17:18:43 +080033#include <stdbool.h>
34#include <stdint.h>
35#include "bpf_net_helpers.h"
36#include "bpf_shared.h"
37
38// This is defined for cgroup bpf filter only.
39#define BPF_DROP_UNLESS_DNS 2
40#define BPF_PASS 1
41#define BPF_DROP 0
42
43// This is used for xt_bpf program only.
44#define BPF_NOMATCH 0
45#define BPF_MATCH 1
46
47#define BPF_EGRESS 0
48#define BPF_INGRESS 1
49
50#define IP_PROTO_OFF offsetof(struct iphdr, protocol)
51#define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr)
52#define IPPROTO_IHL_OFF 0
53#define TCP_FLAG_OFF 13
54#define RST_OFFSET 2
55
Maciej Żenczykowskia4a58a32022-06-13 17:56:06 -070056// For maps netd does not need to access
57#define DEFINE_BPF_MAP_NO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
58 DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, AID_NET_BW_ACCT, 0060)
59
60// For maps netd only needs read only access to
61#define DEFINE_BPF_MAP_RO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
62 DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, AID_NET_BW_ACCT, 0460)
63
64// For maps netd needs to be able to read and write
65#define DEFINE_BPF_MAP_RW_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
66 DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, AID_NET_BW_ACCT, 0660)
67
Maciej Żenczykowskib10e0552022-06-16 14:49:27 -070068// Bpf map arrays on creation are preinitialized to 0 and do not support deletion of a key,
69// see: kernel/bpf/arraymap.c array_map_delete_elem() returns -EINVAL (from both syscall and ebpf)
70// Additionally on newer kernels the bpf jit can optimize out the lookups.
71// only valid indexes are [0..CONFIGURATION_MAP_SIZE-1]
72DEFINE_BPF_MAP_RO_NETD(configuration_map, ARRAY, uint32_t, uint32_t, CONFIGURATION_MAP_SIZE)
73
Maciej Żenczykowskia4a58a32022-06-13 17:56:06 -070074DEFINE_BPF_MAP_RW_NETD(cookie_tag_map, HASH, uint64_t, UidTagValue, COOKIE_UID_MAP_SIZE)
75DEFINE_BPF_MAP_NO_NETD(uid_counterset_map, HASH, uint32_t, uint8_t, UID_COUNTERSET_MAP_SIZE)
76DEFINE_BPF_MAP_NO_NETD(app_uid_stats_map, HASH, uint32_t, StatsValue, APP_STATS_MAP_SIZE)
77DEFINE_BPF_MAP_RW_NETD(stats_map_A, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
78DEFINE_BPF_MAP_RO_NETD(stats_map_B, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
79DEFINE_BPF_MAP_NO_NETD(iface_stats_map, HASH, uint32_t, StatsValue, IFACE_STATS_MAP_SIZE)
Maciej Żenczykowskia4a58a32022-06-13 17:56:06 -070080DEFINE_BPF_MAP_NO_NETD(uid_owner_map, HASH, uint32_t, UidOwnerValue, UID_OWNER_MAP_SIZE)
81DEFINE_BPF_MAP_RW_NETD(uid_permission_map, HASH, uint32_t, uint8_t, UID_OWNER_MAP_SIZE)
Ken Chen587d4232022-01-17 17:18:43 +080082
83/* never actually used from ebpf */
Maciej Żenczykowskia4a58a32022-06-13 17:56:06 -070084DEFINE_BPF_MAP_NO_NETD(iface_index_name_map, HASH, uint32_t, IfaceValue, IFACE_INDEX_NAME_MAP_SIZE)
Ken Chen587d4232022-01-17 17:18:43 +080085
86static __always_inline int is_system_uid(uint32_t uid) {
Maciej Żenczykowskib909d8a2022-06-15 00:40:43 -070087 // MIN_SYSTEM_UID is AID_ROOT == 0, so uint32_t is *always* >= 0
88 // MAX_SYSTEM_UID is AID_NOBODY == 9999, while AID_APP_START == 10000
89 return (uid < AID_APP_START);
Ken Chen587d4232022-01-17 17:18:43 +080090}
91
92/*
93 * Note: this blindly assumes an MTU of 1500, and that packets > MTU are always TCP,
94 * and that TCP is using the Linux default settings with TCP timestamp option enabled
95 * which uses 12 TCP option bytes per frame.
96 *
97 * These are not unreasonable assumptions:
98 *
99 * The internet does not really support MTUs greater than 1500, so most TCP traffic will
100 * be at that MTU, or slightly below it (worst case our upwards adjustment is too small).
101 *
102 * The chance our traffic isn't IP at all is basically zero, so the IP overhead correction
103 * is bound to be needed.
104 *
105 * Furthermore, the likelyhood that we're having to deal with GSO (ie. > MTU) packets that
106 * are not IP/TCP is pretty small (few other things are supported by Linux) and worse case
107 * our extra overhead will be slightly off, but probably still better than assuming none.
108 *
109 * Most servers are also Linux and thus support/default to using TCP timestamp option
110 * (and indeed TCP timestamp option comes from RFC 1323 titled "TCP Extensions for High
111 * Performance" which also defined TCP window scaling and are thus absolutely ancient...).
112 *
113 * All together this should be more correct than if we simply ignored GSO frames
114 * (ie. counted them as single packets with no extra overhead)
115 *
116 * Especially since the number of packets is important for any future clat offload correction.
117 * (which adjusts upward by 20 bytes per packet to account for ipv4 -> ipv6 header conversion)
118 */
119#define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey) \
120 static __always_inline inline void update_##the_stats_map(struct __sk_buff* skb, \
121 int direction, TypeOfKey* key) { \
122 StatsValue* value = bpf_##the_stats_map##_lookup_elem(key); \
123 if (!value) { \
124 StatsValue newValue = {}; \
125 bpf_##the_stats_map##_update_elem(key, &newValue, BPF_NOEXIST); \
126 value = bpf_##the_stats_map##_lookup_elem(key); \
127 } \
128 if (value) { \
129 const int mtu = 1500; \
130 uint64_t packets = 1; \
131 uint64_t bytes = skb->len; \
132 if (bytes > mtu) { \
133 bool is_ipv6 = (skb->protocol == htons(ETH_P_IPV6)); \
134 int ip_overhead = (is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr)); \
135 int tcp_overhead = ip_overhead + sizeof(struct tcphdr) + 12; \
136 int mss = mtu - tcp_overhead; \
137 uint64_t payload = bytes - tcp_overhead; \
138 packets = (payload + mss - 1) / mss; \
139 bytes = tcp_overhead * packets + payload; \
140 } \
141 if (direction == BPF_EGRESS) { \
142 __sync_fetch_and_add(&value->txPackets, packets); \
143 __sync_fetch_and_add(&value->txBytes, bytes); \
144 } else if (direction == BPF_INGRESS) { \
145 __sync_fetch_and_add(&value->rxPackets, packets); \
146 __sync_fetch_and_add(&value->rxBytes, bytes); \
147 } \
148 } \
149 }
150
151DEFINE_UPDATE_STATS(app_uid_stats_map, uint32_t)
152DEFINE_UPDATE_STATS(iface_stats_map, uint32_t)
153DEFINE_UPDATE_STATS(stats_map_A, StatsKey)
154DEFINE_UPDATE_STATS(stats_map_B, StatsKey)
155
156static inline bool skip_owner_match(struct __sk_buff* skb) {
157 int offset = -1;
158 int ret = 0;
159 if (skb->protocol == htons(ETH_P_IP)) {
160 offset = IP_PROTO_OFF;
161 uint8_t proto, ihl;
162 uint8_t flag;
163 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
164 if (!ret) {
165 if (proto == IPPROTO_ESP) {
166 return true;
167 } else if (proto == IPPROTO_TCP) {
168 ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1);
169 ihl = ihl & 0x0F;
170 ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1);
171 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
172 return true;
173 }
174 }
175 }
176 } else if (skb->protocol == htons(ETH_P_IPV6)) {
177 offset = IPV6_PROTO_OFF;
178 uint8_t proto;
179 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
180 if (!ret) {
181 if (proto == IPPROTO_ESP) {
182 return true;
183 } else if (proto == IPPROTO_TCP) {
184 uint8_t flag;
185 ret = bpf_skb_load_bytes(skb, sizeof(struct ipv6hdr) + TCP_FLAG_OFF, &flag, 1);
186 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
187 return true;
188 }
189 }
190 }
191 }
192 return false;
193}
194
195static __always_inline BpfConfig getConfig(uint32_t configKey) {
196 uint32_t mapSettingKey = configKey;
197 BpfConfig* config = bpf_configuration_map_lookup_elem(&mapSettingKey);
198 if (!config) {
199 // Couldn't read configuration entry. Assume everything is disabled.
200 return DEFAULT_CONFIG;
201 }
202 return *config;
203}
204
Maciej Żenczykowski474512a2022-06-07 23:22:53 +0000205// DROP_IF_SET is set of rules that BPF_DROP if rule is globally enabled, and per-uid bit is set
206#define DROP_IF_SET (STANDBY_MATCH | OEM_DENY_1_MATCH | OEM_DENY_2_MATCH | OEM_DENY_3_MATCH)
207// DROP_IF_UNSET is set of rules that should DROP if globally enabled, and per-uid bit is NOT set
208#define DROP_IF_UNSET (DOZABLE_MATCH | POWERSAVE_MATCH | RESTRICTED_MATCH | LOW_POWER_STANDBY_MATCH)
209
Ken Chen587d4232022-01-17 17:18:43 +0800210static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid, int direction) {
211 if (skip_owner_match(skb)) return BPF_PASS;
212
213 if (is_system_uid(uid)) return BPF_PASS;
214
215 BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
216
217 UidOwnerValue* uidEntry = bpf_uid_owner_map_lookup_elem(&uid);
Motomu Utsumi42edc602022-05-12 13:57:42 +0000218 uint32_t uidRules = uidEntry ? uidEntry->rule : 0;
Ken Chen587d4232022-01-17 17:18:43 +0800219 uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0;
220
Maciej Żenczykowski474512a2022-06-07 23:22:53 +0000221 // Warning: funky bit-wise arithmetic: in parallel, for all DROP_IF_SET/UNSET rules
222 // check whether the rules are globally enabled, and if so whether the rules are
223 // set/unset for the specific uid. BPF_DROP if that is the case for ANY of the rules.
224 // We achieve this by masking out only the bits/rules we're interested in checking,
225 // and negating (via bit-wise xor) the bits/rules that should drop if unset.
226 if (enabledRules & (DROP_IF_SET | DROP_IF_UNSET) & (uidRules ^ DROP_IF_UNSET)) return BPF_DROP;
227
Motomu Utsumib08654c2022-05-11 05:56:26 +0000228 if (direction == BPF_INGRESS && skb->ifindex != 1) {
229 if (uidRules & IIF_MATCH) {
230 if (allowed_iif && skb->ifindex != allowed_iif) {
231 // Drops packets not coming from lo nor the allowed interface
232 // allowed interface=0 is a wildcard and does not drop packets
233 return BPF_DROP_UNLESS_DNS;
234 }
235 } else if (uidRules & LOCKDOWN_VPN_MATCH) {
236 // Drops packets not coming from lo and rule does not have IIF_MATCH but has
237 // LOCKDOWN_VPN_MATCH
Ken Chen587d4232022-01-17 17:18:43 +0800238 return BPF_DROP_UNLESS_DNS;
239 }
240 }
241 return BPF_PASS;
242}
243
244static __always_inline inline void update_stats_with_config(struct __sk_buff* skb, int direction,
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900245 StatsKey* key, uint32_t selectedMap) {
Ken Chen587d4232022-01-17 17:18:43 +0800246 if (selectedMap == SELECT_MAP_A) {
247 update_stats_map_A(skb, direction, key);
248 } else if (selectedMap == SELECT_MAP_B) {
249 update_stats_map_B(skb, direction, key);
250 }
251}
252
253static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) {
254 uint32_t sock_uid = bpf_get_socket_uid(skb);
255 uint64_t cookie = bpf_get_socket_cookie(skb);
256 UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
257 uint32_t uid, tag;
258 if (utag) {
259 uid = utag->uid;
260 tag = utag->tag;
261 } else {
262 uid = sock_uid;
263 tag = 0;
264 }
265
266 // Always allow and never count clat traffic. Only the IPv4 traffic on the stacked
267 // interface is accounted for and subject to usage restrictions.
268 // TODO: remove sock_uid check once Nat464Xlat javaland adds the socket tag AID_CLAT for clat.
269 if (sock_uid == AID_CLAT || uid == AID_CLAT) {
270 return BPF_PASS;
271 }
272
273 int match = bpf_owner_match(skb, sock_uid, direction);
274 if ((direction == BPF_EGRESS) && (match == BPF_DROP)) {
275 // If an outbound packet is going to be dropped, we do not count that
276 // traffic.
277 return match;
278 }
279
280// Workaround for secureVPN with VpnIsolation enabled, refer to b/159994981 for details.
281// Keep TAG_SYSTEM_DNS in sync with DnsResolver/include/netd_resolv/resolv.h
282// and TrafficStatsConstants.java
283#define TAG_SYSTEM_DNS 0xFFFFFF82
284 if (tag == TAG_SYSTEM_DNS && uid == AID_DNS) {
285 uid = sock_uid;
286 if (match == BPF_DROP_UNLESS_DNS) match = BPF_PASS;
287 } else {
288 if (match == BPF_DROP_UNLESS_DNS) match = BPF_DROP;
289 }
290
291 StatsKey key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
292
293 uint8_t* counterSet = bpf_uid_counterset_map_lookup_elem(&uid);
294 if (counterSet) key.counterSet = (uint32_t)*counterSet;
295
296 uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
Lorenzo Colitti60cbed32022-03-03 17:49:01 +0900297 uint32_t* selectedMap = bpf_configuration_map_lookup_elem(&mapSettingKey);
Ken Chen587d4232022-01-17 17:18:43 +0800298
299 // Use asm("%0 &= 1" : "+r"(match)) before return match,
300 // to help kernel's bpf verifier, so that it can be 100% certain
301 // that the returned value is always BPF_NOMATCH(0) or BPF_MATCH(1).
302 if (!selectedMap) {
303 asm("%0 &= 1" : "+r"(match));
304 return match;
305 }
306
307 if (key.tag) {
308 update_stats_with_config(skb, direction, &key, *selectedMap);
309 key.tag = 0;
310 }
311
312 update_stats_with_config(skb, direction, &key, *selectedMap);
313 update_app_uid_stats_map(skb, direction, &uid);
314 asm("%0 &= 1" : "+r"(match));
315 return match;
316}
317
Ken Chene541aa42022-02-09 10:00:30 +0800318DEFINE_BPF_PROG("cgroupskb/ingress/stats", AID_ROOT, AID_SYSTEM, bpf_cgroup_ingress)
Ken Chen587d4232022-01-17 17:18:43 +0800319(struct __sk_buff* skb) {
320 return bpf_traffic_account(skb, BPF_INGRESS);
321}
322
Ken Chene541aa42022-02-09 10:00:30 +0800323DEFINE_BPF_PROG("cgroupskb/egress/stats", AID_ROOT, AID_SYSTEM, bpf_cgroup_egress)
Ken Chen587d4232022-01-17 17:18:43 +0800324(struct __sk_buff* skb) {
325 return bpf_traffic_account(skb, BPF_EGRESS);
326}
327
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700328// WARNING: Android T's non-updatable netd depends on the name of this program.
Ken Chen587d4232022-01-17 17:18:43 +0800329DEFINE_BPF_PROG("skfilter/egress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_egress_prog)
330(struct __sk_buff* skb) {
331 // Clat daemon does not generate new traffic, all its traffic is accounted for already
332 // on the v4-* interfaces (except for the 20 (or 28) extra bytes of IPv6 vs IPv4 overhead,
333 // but that can be corrected for later when merging v4-foo stats into interface foo's).
334 // TODO: remove sock_uid check once Nat464Xlat javaland adds the socket tag AID_CLAT for clat.
335 uint32_t sock_uid = bpf_get_socket_uid(skb);
336 if (sock_uid == AID_CLAT) return BPF_NOMATCH;
337 if (sock_uid == AID_SYSTEM) {
338 uint64_t cookie = bpf_get_socket_cookie(skb);
339 UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
340 if (utag && utag->uid == AID_CLAT) return BPF_NOMATCH;
341 }
342
343 uint32_t key = skb->ifindex;
344 update_iface_stats_map(skb, BPF_EGRESS, &key);
345 return BPF_MATCH;
346}
347
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700348// WARNING: Android T's non-updatable netd depends on the name of this program.
Ken Chen587d4232022-01-17 17:18:43 +0800349DEFINE_BPF_PROG("skfilter/ingress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_ingress_prog)
350(struct __sk_buff* skb) {
351 // Clat daemon traffic is not accounted by virtue of iptables raw prerouting drop rule
352 // (in clat_raw_PREROUTING chain), which triggers before this (in bw_raw_PREROUTING chain).
353 // It will be accounted for on the v4-* clat interface instead.
354 // Keep that in mind when moving this out of iptables xt_bpf and into tc ingress (or xdp).
355
356 uint32_t key = skb->ifindex;
357 update_iface_stats_map(skb, BPF_INGRESS, &key);
358 return BPF_MATCH;
359}
360
361DEFINE_BPF_PROG("schedact/ingress/account", AID_ROOT, AID_NET_ADMIN, tc_bpf_ingress_account_prog)
362(struct __sk_buff* skb) {
Patrick Rohr148aea82022-02-24 14:12:32 +0100363 if (is_received_skb(skb)) {
364 // Account for ingress traffic before tc drops it.
365 uint32_t key = skb->ifindex;
366 update_iface_stats_map(skb, BPF_INGRESS, &key);
367 }
Ken Chen587d4232022-01-17 17:18:43 +0800368 return TC_ACT_UNSPEC;
369}
370
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700371// WARNING: Android T's non-updatable netd depends on the name of this program.
Ken Chen587d4232022-01-17 17:18:43 +0800372DEFINE_BPF_PROG("skfilter/allowlist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_allowlist_prog)
373(struct __sk_buff* skb) {
374 uint32_t sock_uid = bpf_get_socket_uid(skb);
375 if (is_system_uid(sock_uid)) return BPF_MATCH;
376
377 // 65534 is the overflow 'nobody' uid, usually this being returned means
378 // that skb->sk is NULL during RX (early decap socket lookup failure),
379 // which commonly happens for incoming packets to an unconnected udp socket.
380 // Additionally bpf_get_socket_cookie() returns 0 if skb->sk is NULL
381 if ((sock_uid == 65534) && !bpf_get_socket_cookie(skb) && is_received_skb(skb))
382 return BPF_MATCH;
383
384 UidOwnerValue* allowlistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
385 if (allowlistMatch) return allowlistMatch->rule & HAPPY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
386 return BPF_NOMATCH;
387}
388
Maciej Żenczykowski12057372022-06-14 14:36:34 -0700389// WARNING: Android T's non-updatable netd depends on the name of this program.
Ken Chen587d4232022-01-17 17:18:43 +0800390DEFINE_BPF_PROG("skfilter/denylist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_denylist_prog)
391(struct __sk_buff* skb) {
392 uint32_t sock_uid = bpf_get_socket_uid(skb);
393 UidOwnerValue* denylistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
394 if (denylistMatch) return denylistMatch->rule & PENALTY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
395 return BPF_NOMATCH;
396}
397
Maciej Żenczykowski6ed2ab92022-05-16 11:00:11 -0700398DEFINE_BPF_PROG("cgroupsock/inet/create", AID_ROOT, AID_ROOT, inet_socket_create)
Ken Chen587d4232022-01-17 17:18:43 +0800399(struct bpf_sock* sk) {
400 uint64_t gid_uid = bpf_get_current_uid_gid();
401 /*
402 * A given app is guaranteed to have the same app ID in all the profiles in
403 * which it is installed, and install permission is granted to app for all
404 * user at install time so we only check the appId part of a request uid at
405 * run time. See UserHandle#isSameApp for detail.
406 */
Maciej Żenczykowskib909d8a2022-06-15 00:40:43 -0700407 uint32_t appId = (gid_uid & 0xffffffff) % AID_USER_OFFSET; // == PER_USER_RANGE == 100000
Ken Chen587d4232022-01-17 17:18:43 +0800408 uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId);
409 if (!permissions) {
410 // UID not in map. Default to just INTERNET permission.
411 return 1;
412 }
413
414 // A return value of 1 means allow, everything else means deny.
415 return (*permissions & BPF_PERMISSION_INTERNET) == BPF_PERMISSION_INTERNET;
416}
417
418LICENSE("Apache 2.0");
419CRITICAL("netd");