blob: 4e41d64e1c79ee619837f52dbae6703d0a2256c7 [file] [log] [blame]
Hungming Chen56c632c2020-09-10 15:42:58 +08001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <linux/if.h>
18#include <linux/ip.h>
19#include <linux/ipv6.h>
20#include <linux/pkt_cls.h>
21#include <linux/tcp.h>
22
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -080023// bionic kernel uapi linux/udp.h header is munged...
24#define __kernel_udphdr udphdr
25#include <linux/udp.h>
26
Hungming Chen56c632c2020-09-10 15:42:58 +080027#include "bpf_helpers.h"
28#include "bpf_net_helpers.h"
Lorenzo Colittib81584d2021-02-06 00:00:58 +090029#include "bpf_tethering.h"
Hungming Chen56c632c2020-09-10 15:42:58 +080030#include "netdbpf/bpf_shared.h"
31
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -080032// From kernel:include/net/ip.h
33#define IP_DF 0x4000 // Flag: "Don't Fragment"
34
Hungming Chen56c632c2020-09-10 15:42:58 +080035// Tethering stats, indexed by upstream interface.
Maciej Żenczykowski088fe192021-01-20 13:34:17 -080036DEFINE_BPF_MAP_GRW(tether_stats_map, HASH, TetherStatsKey, TetherStatsValue, 16, AID_NETWORK_STACK)
Hungming Chen56c632c2020-09-10 15:42:58 +080037
38// Tethering data limit, indexed by upstream interface.
39// (tethering allowed when stats[iif].rxBytes + stats[iif].txBytes < limit[iif])
Maciej Żenczykowski088fe192021-01-20 13:34:17 -080040DEFINE_BPF_MAP_GRW(tether_limit_map, HASH, TetherLimitKey, TetherLimitValue, 16, AID_NETWORK_STACK)
41
42// ----- IPv6 Support -----
43
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -080044DEFINE_BPF_MAP_GRW(tether_downstream6_map, HASH, TetherDownstream6Key, Tether6Value, 64,
Maciej Żenczykowski088fe192021-01-20 13:34:17 -080045 AID_NETWORK_STACK)
46
47DEFINE_BPF_MAP_GRW(tether_downstream64_map, HASH, TetherDownstream64Key, TetherDownstream64Value,
48 64, AID_NETWORK_STACK)
49
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -080050DEFINE_BPF_MAP_GRW(tether_upstream6_map, HASH, TetherUpstream6Key, Tether6Value, 64,
Maciej Żenczykowski088fe192021-01-20 13:34:17 -080051 AID_NETWORK_STACK)
Hungming Chen56c632c2020-09-10 15:42:58 +080052
Lorenzo Colittib81584d2021-02-06 00:00:58 +090053DEFINE_BPF_MAP_GRW(tether_error_map, ARRAY, __u32, __u32, BPF_TETHER_ERR__MAX,
54 AID_NETWORK_STACK)
55
56#define ERROR_EXIT(error) do { \
57 __u32 errcode = BPF_TETHER_ERR_ ## error; \
58 __u32 *errcount = bpf_tether_error_map_lookup_elem(&errcode); \
59 if (errcount) __sync_fetch_and_add(errcount, 1); \
60 return TC_ACT_OK; \
61} while(0)
62
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -080063static inline __always_inline int do_forward6(struct __sk_buff* skb, const bool is_ethernet,
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -080064 const bool downstream) {
65 const int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0;
Hungming Chen56c632c2020-09-10 15:42:58 +080066 void* data = (void*)(long)skb->data;
67 const void* data_end = (void*)(long)skb->data_end;
68 struct ethhdr* eth = is_ethernet ? data : NULL; // used iff is_ethernet
69 struct ipv6hdr* ip6 = is_ethernet ? (void*)(eth + 1) : data;
70
Maciej Żenczykowski18552e82021-01-24 19:59:05 -080071 // Require ethernet dst mac address to be our unicast address.
72 if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_OK;
73
Hungming Chen56c632c2020-09-10 15:42:58 +080074 // Must be meta-ethernet IPv6 frame
75 if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_OK;
76
77 // Must have (ethernet and) ipv6 header
78 if (data + l2_header_size + sizeof(*ip6) > data_end) return TC_ACT_OK;
79
80 // Ethertype - if present - must be IPv6
81 if (is_ethernet && (eth->h_proto != htons(ETH_P_IPV6))) return TC_ACT_OK;
82
83 // IP version must be 6
Lorenzo Colittib81584d2021-02-06 00:00:58 +090084 if (ip6->version != 6) ERROR_EXIT(INVALID_IP_VERSION);
Hungming Chen56c632c2020-09-10 15:42:58 +080085
86 // Cannot decrement during forward if already zero or would be zero,
87 // Let the kernel's stack handle these cases and generate appropriate ICMP errors.
Lorenzo Colittib81584d2021-02-06 00:00:58 +090088 if (ip6->hop_limit <= 1) ERROR_EXIT(LOW_TTL);
Hungming Chen56c632c2020-09-10 15:42:58 +080089
Maciej Żenczykowskifc4f6542021-01-22 22:19:45 -080090 // If hardware offload is running and programming flows based on conntrack entries,
91 // try not to interfere with it.
92 if (ip6->nexthdr == IPPROTO_TCP) {
93 struct tcphdr* tcph = (void*)(ip6 + 1);
94
95 // Make sure we can get at the tcp header
Lorenzo Colittib81584d2021-02-06 00:00:58 +090096 if (data + l2_header_size + sizeof(*ip6) + sizeof(*tcph) > data_end)
97 ERROR_EXIT(INVALID_TCP_HEADER);
Maciej Żenczykowskifc4f6542021-01-22 22:19:45 -080098
99 // Do not offload TCP packets with any one of the SYN/FIN/RST flags
Lorenzo Colittib81584d2021-02-06 00:00:58 +0900100 if (tcph->syn || tcph->fin || tcph->rst) ERROR_EXIT(TCP_CONTROL_PACKET);
Maciej Żenczykowskifc4f6542021-01-22 22:19:45 -0800101 }
102
Hungming Chen56c632c2020-09-10 15:42:58 +0800103 // Protect against forwarding packets sourced from ::1 or fe80::/64 or other weirdness.
104 __be32 src32 = ip6->saddr.s6_addr32[0];
105 if (src32 != htonl(0x0064ff9b) && // 64:ff9b:/32 incl. XLAT464 WKP
106 (src32 & htonl(0xe0000000)) != htonl(0x20000000)) // 2000::/3 Global Unicast
Lorenzo Colittib81584d2021-02-06 00:00:58 +0900107 ERROR_EXIT(NON_GLOBAL_SRC);
Hungming Chen56c632c2020-09-10 15:42:58 +0800108
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800109 // Protect against forwarding packets destined to ::1 or fe80::/64 or other weirdness.
110 __be32 dst32 = ip6->daddr.s6_addr32[0];
111 if (dst32 != htonl(0x0064ff9b) && // 64:ff9b:/32 incl. XLAT464 WKP
112 (dst32 & htonl(0xe0000000)) != htonl(0x20000000)) // 2000::/3 Global Unicast
Lorenzo Colittib81584d2021-02-06 00:00:58 +0900113 ERROR_EXIT(NON_GLOBAL_DST);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800114
115 // In the upstream direction do not forward traffic within the same /64 subnet.
116 if (!downstream && (src32 == dst32) && (ip6->saddr.s6_addr32[1] == ip6->daddr.s6_addr32[1]))
Lorenzo Colittib81584d2021-02-06 00:00:58 +0900117 ERROR_EXIT(LOCAL_SRC_DST);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800118
119 TetherDownstream6Key kd = {
Hungming Chen56c632c2020-09-10 15:42:58 +0800120 .iif = skb->ifindex,
121 .neigh6 = ip6->daddr,
122 };
123
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800124 TetherUpstream6Key ku = {
125 .iif = skb->ifindex,
126 };
127
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800128 Tether6Value* v = downstream ? bpf_tether_downstream6_map_lookup_elem(&kd)
129 : bpf_tether_upstream6_map_lookup_elem(&ku);
Hungming Chen56c632c2020-09-10 15:42:58 +0800130
131 // If we don't find any offload information then simply let the core stack handle it...
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800132 if (!v) return TC_ACT_OK;
Hungming Chen56c632c2020-09-10 15:42:58 +0800133
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800134 uint32_t stat_and_limit_k = downstream ? skb->ifindex : v->oif;
Hungming Chen56c632c2020-09-10 15:42:58 +0800135
136 TetherStatsValue* stat_v = bpf_tether_stats_map_lookup_elem(&stat_and_limit_k);
137
138 // If we don't have anywhere to put stats, then abort...
Lorenzo Colittib81584d2021-02-06 00:00:58 +0900139 if (!stat_v) ERROR_EXIT(NO_STATS_ENTRY);
Hungming Chen56c632c2020-09-10 15:42:58 +0800140
141 uint64_t* limit_v = bpf_tether_limit_map_lookup_elem(&stat_and_limit_k);
142
143 // If we don't have a limit, then abort...
Lorenzo Colittib81584d2021-02-06 00:00:58 +0900144 if (!limit_v) ERROR_EXIT(NO_LIMIT_ENTRY);
Hungming Chen56c632c2020-09-10 15:42:58 +0800145
146 // Required IPv6 minimum mtu is 1280, below that not clear what we should do, abort...
Lorenzo Colittib81584d2021-02-06 00:00:58 +0900147 if (v->pmtu < IPV6_MIN_MTU) ERROR_EXIT(BELOW_IPV6_MTU);
Hungming Chen56c632c2020-09-10 15:42:58 +0800148
149 // Approximate handling of TCP/IPv6 overhead for incoming LRO/GRO packets: default
150 // outbound path mtu of 1500 is not necessarily correct, but worst case we simply
151 // undercount, which is still better then not accounting for this overhead at all.
152 // Note: this really shouldn't be device/path mtu at all, but rather should be
153 // derived from this particular connection's mss (ie. from gro segment size).
154 // This would require a much newer kernel with newer ebpf accessors.
155 // (This is also blindly assuming 12 bytes of tcp timestamp option in tcp header)
156 uint64_t packets = 1;
157 uint64_t bytes = skb->len;
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800158 if (bytes > v->pmtu) {
Hungming Chen56c632c2020-09-10 15:42:58 +0800159 const int tcp_overhead = sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + 12;
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800160 const int mss = v->pmtu - tcp_overhead;
Hungming Chen56c632c2020-09-10 15:42:58 +0800161 const uint64_t payload = bytes - tcp_overhead;
162 packets = (payload + mss - 1) / mss;
163 bytes = tcp_overhead * packets + payload;
164 }
165
166 // Are we past the limit? If so, then abort...
167 // Note: will not overflow since u64 is 936 years even at 5Gbps.
168 // Do not drop here. Offload is just that, whenever we fail to handle
169 // a packet we let the core stack deal with things.
170 // (The core stack needs to handle limits correctly anyway,
171 // since we don't offload all traffic in both directions)
Lorenzo Colittib81584d2021-02-06 00:00:58 +0900172 if (stat_v->rxBytes + stat_v->txBytes + bytes > *limit_v) ERROR_EXIT(LIMIT_REACHED);
Hungming Chen56c632c2020-09-10 15:42:58 +0800173
174 if (!is_ethernet) {
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800175 // Try to inject an ethernet header, and simply return if we fail.
176 // We do this even if TX interface is RAWIP and thus does not need an ethernet header,
177 // because this is easier and the kernel will strip extraneous ethernet header.
178 if (bpf_skb_change_head(skb, sizeof(struct ethhdr), /*flags*/ 0)) {
179 __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
Lorenzo Colittib81584d2021-02-06 00:00:58 +0900180 ERROR_EXIT(CHANGE_HEAD_FAILED);
Hungming Chen56c632c2020-09-10 15:42:58 +0800181 }
182
183 // bpf_skb_change_head() invalidates all pointers - reload them
184 data = (void*)(long)skb->data;
185 data_end = (void*)(long)skb->data_end;
186 eth = data;
187 ip6 = (void*)(eth + 1);
188
189 // I do not believe this can ever happen, but keep the verifier happy...
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800190 if (data + sizeof(struct ethhdr) + sizeof(*ip6) > data_end) {
191 __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, 1);
Hungming Chen56c632c2020-09-10 15:42:58 +0800192 return TC_ACT_SHOT;
193 }
194 };
195
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800196 // At this point we always have an ethernet header - which will get stripped by the
197 // kernel during transmit through a rawip interface. ie. 'eth' pointer is valid.
198 // Additionally note that 'is_ethernet' and 'l2_header_size' are no longer correct.
199
Hungming Chen56c632c2020-09-10 15:42:58 +0800200 // CHECKSUM_COMPLETE is a 16-bit one's complement sum,
201 // thus corrections for it need to be done in 16-byte chunks at even offsets.
202 // IPv6 nexthdr is at offset 6, while hop limit is at offset 7
203 uint8_t old_hl = ip6->hop_limit;
204 --ip6->hop_limit;
205 uint8_t new_hl = ip6->hop_limit;
206
207 // bpf_csum_update() always succeeds if the skb is CHECKSUM_COMPLETE and returns an error
208 // (-ENOTSUPP) if it isn't.
209 bpf_csum_update(skb, 0xFFFF - ntohs(old_hl) + ntohs(new_hl));
210
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800211 __sync_fetch_and_add(downstream ? &stat_v->rxPackets : &stat_v->txPackets, packets);
212 __sync_fetch_and_add(downstream ? &stat_v->rxBytes : &stat_v->txBytes, bytes);
Hungming Chen56c632c2020-09-10 15:42:58 +0800213
214 // Overwrite any mac header with the new one
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800215 // For a rawip tx interface it will simply be a bunch of zeroes and later stripped.
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800216 *eth = v->macHeader;
Hungming Chen56c632c2020-09-10 15:42:58 +0800217
218 // Redirect to forwarded interface.
219 //
220 // Note that bpf_redirect() cannot fail unless you pass invalid flags.
221 // The redirect actually happens after the ebpf program has already terminated,
222 // and can fail for example for mtu reasons at that point in time, but there's nothing
223 // we can do about it here.
Maciej Żenczykowski7dfbcf52021-01-26 16:08:57 -0800224 return bpf_redirect(v->oif, 0 /* this is effectively BPF_F_EGRESS */);
Hungming Chen56c632c2020-09-10 15:42:58 +0800225}
226
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800227DEFINE_BPF_PROG("schedcls/tether_downstream6_ether", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski770e0a72021-01-18 20:14:03 -0800228 sched_cls_tether_downstream6_ether)
Maciej Żenczykowski6b7829f2021-01-18 00:03:37 -0800229(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800230 return do_forward6(skb, /* is_ethernet */ true, /* downstream */ true);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800231}
232
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800233DEFINE_BPF_PROG("schedcls/tether_upstream6_ether", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800234 sched_cls_tether_upstream6_ether)
235(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800236 return do_forward6(skb, /* is_ethernet */ true, /* downstream */ false);
Hungming Chen56c632c2020-09-10 15:42:58 +0800237}
238
239// Note: section names must be unique to prevent programs from appending to each other,
240// so instead the bpf loader will strip everything past the final $ symbol when actually
241// pinning the program into the filesystem.
242//
243// bpf_skb_change_head() is only present on 4.14+ and 2 trivial kernel patches are needed:
244// ANDROID: net: bpf: Allow TC programs to call BPF_FUNC_skb_change_head
245// ANDROID: net: bpf: permit redirect from ingress L3 to egress L2 devices at near max mtu
246// (the first of those has already been upstreamed)
247//
248// 5.4 kernel support was only added to Android Common Kernel in R,
249// and thus a 5.4 kernel always supports this.
250//
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800251// Hence, these mandatory (must load successfully) implementations for 5.4+ kernels:
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800252DEFINE_BPF_PROG_KVER("schedcls/tether_downstream6_rawip$5_4", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski770e0a72021-01-18 20:14:03 -0800253 sched_cls_tether_downstream6_rawip_5_4, KVER(5, 4, 0))
Hungming Chen56c632c2020-09-10 15:42:58 +0800254(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800255 return do_forward6(skb, /* is_ethernet */ false, /* downstream */ true);
Hungming Chen56c632c2020-09-10 15:42:58 +0800256}
257
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800258DEFINE_BPF_PROG_KVER("schedcls/tether_upstream6_rawip$5_4", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800259 sched_cls_tether_upstream6_rawip_5_4, KVER(5, 4, 0))
260(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800261 return do_forward6(skb, /* is_ethernet */ false, /* downstream */ false);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800262}
263
264// and these identical optional (may fail to load) implementations for [4.14..5.4) patched kernels:
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800265DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream6_rawip$4_14",
266 AID_ROOT, AID_NETWORK_STACK,
267 sched_cls_tether_downstream6_rawip_4_14,
268 KVER(4, 14, 0), KVER(5, 4, 0))
Hungming Chen56c632c2020-09-10 15:42:58 +0800269(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800270 return do_forward6(skb, /* is_ethernet */ false, /* downstream */ true);
Hungming Chen56c632c2020-09-10 15:42:58 +0800271}
272
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800273DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream6_rawip$4_14",
274 AID_ROOT, AID_NETWORK_STACK,
275 sched_cls_tether_upstream6_rawip_4_14,
276 KVER(4, 14, 0), KVER(5, 4, 0))
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800277(struct __sk_buff* skb) {
Maciej Żenczykowskibf8ec1a2021-01-24 19:56:39 -0800278 return do_forward6(skb, /* is_ethernet */ false, /* downstream */ false);
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800279}
280
281// and define no-op stubs for [4.9,4.14) and unpatched [4.14,5.4) kernels.
Hungming Chen56c632c2020-09-10 15:42:58 +0800282// (if the above real 4.14+ program loaded successfully, then bpfloader will have already pinned
283// it at the same location this one would be pinned at and will thus skip loading this stub)
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800284DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream6_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowski770e0a72021-01-18 20:14:03 -0800285 sched_cls_tether_downstream6_rawip_stub, KVER_NONE, KVER(5, 4, 0))
Hungming Chen56c632c2020-09-10 15:42:58 +0800286(struct __sk_buff* skb) {
287 return TC_ACT_OK;
288}
289
Maciej Żenczykowski5b00fbd2021-01-19 23:37:51 -0800290DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream6_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
Maciej Żenczykowskibca0c852021-01-19 01:22:17 -0800291 sched_cls_tether_upstream6_rawip_stub, KVER_NONE, KVER(5, 4, 0))
292(struct __sk_buff* skb) {
293 return TC_ACT_OK;
294}
295
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800296// ----- IPv4 Support -----
297
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800298DEFINE_BPF_MAP_GRW(tether_downstream4_map, HASH, Tether4Key, Tether4Value, 64, AID_NETWORK_STACK)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800299
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800300DEFINE_BPF_MAP_GRW(tether_upstream4_map, HASH, Tether4Key, Tether4Value, 64, AID_NETWORK_STACK)
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800301
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800302static inline __always_inline int do_forward4(struct __sk_buff* skb, const bool is_ethernet,
303 const bool downstream) {
304 const int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0;
305 void* data = (void*)(long)skb->data;
306 const void* data_end = (void*)(long)skb->data_end;
307 struct ethhdr* eth = is_ethernet ? data : NULL; // used iff is_ethernet
308 struct iphdr* ip = is_ethernet ? (void*)(eth + 1) : data;
309
310 // Require ethernet dst mac address to be our unicast address.
311 if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_OK;
312
313 // Must be meta-ethernet IPv4 frame
314 if (skb->protocol != htons(ETH_P_IP)) return TC_ACT_OK;
315
316 // Must have (ethernet and) ipv4 header
317 if (data + l2_header_size + sizeof(*ip) > data_end) return TC_ACT_OK;
318
319 // Ethertype - if present - must be IPv4
320 if (is_ethernet && (eth->h_proto != htons(ETH_P_IP))) return TC_ACT_OK;
321
322 // IP version must be 4
323 if (ip->version != 4) return TC_ACT_OK;
324
325 // We cannot handle IP options, just standard 20 byte == 5 dword minimal IPv4 header
326 if (ip->ihl != 5) return TC_ACT_OK;
327
328 // Calculate the IPv4 one's complement checksum of the IPv4 header.
329 __wsum sum4 = 0;
330 for (int i = 0; i < sizeof(*ip) / sizeof(__u16); ++i) {
331 sum4 += ((__u16*)ip)[i];
332 }
333 // Note that sum4 is guaranteed to be non-zero by virtue of ip4->version == 4
334 sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse u32 into range 1 .. 0x1FFFE
335 sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse any potential carry into u16
336 // for a correct checksum we should get *a* zero, but sum4 must be positive, ie 0xFFFF
337 if (sum4 != 0xFFFF) return TC_ACT_OK;
338
339 // Minimum IPv4 total length is the size of the header
340 if (ntohs(ip->tot_len) < sizeof(*ip)) return TC_ACT_OK;
341
342 // We are incapable of dealing with IPv4 fragments
343 if (ip->frag_off & ~htons(IP_DF)) return TC_ACT_OK;
344
345 // Cannot decrement during forward if already zero or would be zero,
346 // Let the kernel's stack handle these cases and generate appropriate ICMP errors.
347 if (ip->ttl <= 1) return TC_ACT_OK;
348
349 const bool is_tcp = (ip->protocol == IPPROTO_TCP);
350
351 // We do not support anything besides TCP and UDP
352 if (!is_tcp && (ip->protocol != IPPROTO_UDP)) return TC_ACT_OK;
353
354 struct tcphdr* tcph = is_tcp ? (void*)(ip + 1) : NULL;
355 struct udphdr* udph = is_tcp ? NULL : (void*)(ip + 1);
356
357 if (is_tcp) {
358 // Make sure we can get at the tcp header
359 if (data + l2_header_size + sizeof(*ip) + sizeof(*tcph) > data_end) return TC_ACT_OK;
360
361 // If hardware offload is running and programming flows based on conntrack entries, try not
362 // to interfere with it, so do not offload TCP packets with any one of the SYN/FIN/RST flags
363 if (tcph->syn || tcph->fin || tcph->rst) return TC_ACT_OK;
364 } else { // UDP
365 // Make sure we can get at the udp header
366 if (data + l2_header_size + sizeof(*ip) + sizeof(*udph) > data_end) return TC_ACT_OK;
367 }
368
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800369 Tether4Key k = {
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800370 .iif = skb->ifindex,
371 .l4Proto = ip->protocol,
372 .src4.s_addr = ip->saddr,
373 .dst4.s_addr = ip->daddr,
374 .srcPort = is_tcp ? tcph->source : udph->source,
375 .dstPort = is_tcp ? tcph->dest : udph->dest,
376 };
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800377 if (is_ethernet) for (int i = 0; i < ETH_ALEN; ++i) k.dstMac[i] = eth->h_dest[i];
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800378
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800379 Tether4Value* v = downstream ? bpf_tether_downstream4_map_lookup_elem(&k)
380 : bpf_tether_upstream4_map_lookup_elem(&k);
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800381
382 // If we don't find any offload information then simply let the core stack handle it...
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800383 if (!v) return TC_ACT_OK;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800384
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800385 uint32_t stat_and_limit_k = downstream ? skb->ifindex : v->oif;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800386
387 TetherStatsValue* stat_v = bpf_tether_stats_map_lookup_elem(&stat_and_limit_k);
388
389 // If we don't have anywhere to put stats, then abort...
390 if (!stat_v) return TC_ACT_OK;
391
392 uint64_t* limit_v = bpf_tether_limit_map_lookup_elem(&stat_and_limit_k);
393
394 // If we don't have a limit, then abort...
395 if (!limit_v) return TC_ACT_OK;
396
397 // Required IPv4 minimum mtu is 68, below that not clear what we should do, abort...
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800398 if (v->pmtu < 68) return TC_ACT_OK;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800399
400 // Approximate handling of TCP/IPv4 overhead for incoming LRO/GRO packets: default
401 // outbound path mtu of 1500 is not necessarily correct, but worst case we simply
402 // undercount, which is still better then not accounting for this overhead at all.
403 // Note: this really shouldn't be device/path mtu at all, but rather should be
404 // derived from this particular connection's mss (ie. from gro segment size).
405 // This would require a much newer kernel with newer ebpf accessors.
406 // (This is also blindly assuming 12 bytes of tcp timestamp option in tcp header)
407 uint64_t packets = 1;
408 uint64_t bytes = skb->len;
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800409 if (bytes > v->pmtu) {
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800410 const int tcp_overhead = sizeof(struct iphdr) + sizeof(struct tcphdr) + 12;
Maciej Żenczykowski1feb8b42021-01-25 12:01:31 -0800411 const int mss = v->pmtu - tcp_overhead;
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800412 const uint64_t payload = bytes - tcp_overhead;
413 packets = (payload + mss - 1) / mss;
414 bytes = tcp_overhead * packets + payload;
415 }
416
417 // Are we past the limit? If so, then abort...
418 // Note: will not overflow since u64 is 936 years even at 5Gbps.
419 // Do not drop here. Offload is just that, whenever we fail to handle
420 // a packet we let the core stack deal with things.
421 // (The core stack needs to handle limits correctly anyway,
422 // since we don't offload all traffic in both directions)
423 if (stat_v->rxBytes + stat_v->txBytes + bytes > *limit_v) return TC_ACT_OK;
424
425 // TODO: replace Errors with Packets once implemented
426 __sync_fetch_and_add(downstream ? &stat_v->rxErrors : &stat_v->txErrors, packets);
427 __sync_fetch_and_add(downstream ? &stat_v->rxBytes : &stat_v->txBytes, bytes);
428
429 // TODO: not actually implemented yet
430 return TC_ACT_OK;
431}
432
433// Real implementations for 5.9+ kernels
434
435DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_ether$5_9", AID_ROOT, AID_NETWORK_STACK,
436 sched_cls_tether_downstream4_ether_5_9, KVER(5, 9, 0))
437(struct __sk_buff* skb) {
438 return do_forward4(skb, /* is_ethernet */ true, /* downstream */ true);
439}
440
441DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_rawip$5_9", AID_ROOT, AID_NETWORK_STACK,
442 sched_cls_tether_downstream4_rawip_5_9, KVER(5, 9, 0))
443(struct __sk_buff* skb) {
444 return do_forward4(skb, /* is_ethernet */ false, /* downstream */ true);
445}
446
447DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_ether$5_9", AID_ROOT, AID_NETWORK_STACK,
448 sched_cls_tether_upstream4_ether_5_9, KVER(5, 9, 0))
449(struct __sk_buff* skb) {
450 return do_forward4(skb, /* is_ethernet */ true, /* downstream */ false);
451}
452
453DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_rawip$5_9", AID_ROOT, AID_NETWORK_STACK,
454 sched_cls_tether_upstream4_rawip_5_9, KVER(5, 9, 0))
455(struct __sk_buff* skb) {
456 return do_forward4(skb, /* is_ethernet */ false, /* downstream */ false);
457}
458
459// Placeholder implementations for older pre-5.9 kernels
460
461DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$stub", AID_ROOT, AID_NETWORK_STACK,
462 sched_cls_tether_downstream4_ether_stub, KVER_NONE, KVER(5, 9, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800463(struct __sk_buff* skb) {
464 return TC_ACT_OK;
465}
466
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800467DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
468 sched_cls_tether_downstream4_rawip_stub, KVER_NONE, KVER(5, 9, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800469(struct __sk_buff* skb) {
470 return TC_ACT_OK;
471}
472
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800473DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$stub", AID_ROOT, AID_NETWORK_STACK,
474 sched_cls_tether_upstream4_ether_stub, KVER_NONE, KVER(5, 9, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800475(struct __sk_buff* skb) {
476 return TC_ACT_OK;
477}
478
Maciej Żenczykowskic2b01462021-01-24 21:01:29 -0800479DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$stub", AID_ROOT, AID_NETWORK_STACK,
480 sched_cls_tether_upstream4_rawip_stub, KVER_NONE, KVER(5, 9, 0))
Maciej Żenczykowski088fe192021-01-20 13:34:17 -0800481(struct __sk_buff* skb) {
482 return TC_ACT_OK;
483}
484
Maciej Żenczykowskib1997422021-01-20 14:31:50 -0800485// ----- XDP Support -----
486
487#define DEFINE_XDP_PROG(str, func) \
488 DEFINE_BPF_PROG_KVER(str, AID_ROOT, AID_NETWORK_STACK, func, KVER(5, 9, 0))(struct xdp_md *ctx)
489
490DEFINE_XDP_PROG("xdp/tether_downstream_ether",
491 xdp_tether_downstream_ether) {
492 return XDP_PASS;
493}
494
495DEFINE_XDP_PROG("xdp/tether_downstream_rawip",
496 xdp_tether_downstream_rawip) {
497 return XDP_PASS;
498}
499
500DEFINE_XDP_PROG("xdp/tether_upstream_ether",
501 xdp_tether_upstream_ether) {
502 return XDP_PASS;
503}
504
505DEFINE_XDP_PROG("xdp/tether_upstream_rawip",
506 xdp_tether_upstream_rawip) {
507 return XDP_PASS;
508}
509
Hungming Chen56c632c2020-09-10 15:42:58 +0800510LICENSE("Apache 2.0");
511CRITICAL("netd");