blob: e75869285bd045343168e99ac83f469a177869f8 [file] [log] [blame]
Maciej Żenczykowskidca6ce72019-03-07 16:54:12 -08001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -070017#include <linux/bpf.h>
18#include <linux/if.h>
19#include <linux/if_ether.h>
20#include <linux/in.h>
21#include <linux/in6.h>
22#include <linux/ip.h>
23#include <linux/ipv6.h>
Maciej Żenczykowskidca6ce72019-03-07 16:54:12 -080024#include <linux/pkt_cls.h>
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -070025#include <linux/swab.h>
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -070026#include <stdbool.h>
27#include <stdint.h>
28
Maciej Żenczykowski59b62e62020-07-15 20:06:17 +000029// bionic kernel uapi linux/udp.h header is munged...
30#define __kernel_udphdr udphdr
31#include <linux/udp.h>
32
Maciej Żenczykowskidca6ce72019-03-07 16:54:12 -080033#include "bpf_helpers.h"
Maciej Żenczykowski3cfcdd62019-10-31 01:03:39 -070034#include "bpf_net_helpers.h"
Maciej Żenczykowskidca6ce72019-03-07 16:54:12 -080035#include "netdbpf/bpf_shared.h"
36
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -070037// From kernel:include/net/ip.h
38#define IP_DF 0x4000 // Flag: "Don't Fragment"
39
Maciej Żenczykowskib8a7be52019-04-19 21:47:08 -070040DEFINE_BPF_MAP(clat_ingress_map, HASH, ClatIngressKey, ClatIngressValue, 16)
Maciej Żenczykowskidca6ce72019-03-07 16:54:12 -080041
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -070042static inline __always_inline int nat64(struct __sk_buff* skb, bool is_ethernet) {
43 const int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0;
44 void* data = (void*)(long)skb->data;
45 const void* data_end = (void*)(long)skb->data_end;
46 const struct ethhdr* const eth = is_ethernet ? data : NULL; // used iff is_ethernet
47 const struct ipv6hdr* const ip6 = is_ethernet ? (void*)(eth + 1) : data;
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -070048
49 // Must be meta-ethernet IPv6 frame
50 if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_OK;
51
52 // Must have (ethernet and) ipv6 header
53 if (data + l2_header_size + sizeof(*ip6) > data_end) return TC_ACT_OK;
54
55 // Ethertype - if present - must be IPv6
56 if (is_ethernet && (eth->h_proto != htons(ETH_P_IPV6))) return TC_ACT_OK;
57
58 // IP version must be 6
59 if (ip6->version != 6) return TC_ACT_OK;
60
61 // Maximum IPv6 payload length that can be translated to IPv4
62 if (ntohs(ip6->payload_len) > 0xFFFF - sizeof(struct iphdr)) return TC_ACT_OK;
63
64 switch (ip6->nexthdr) {
Maciej Żenczykowski3ed829e2019-09-07 06:53:20 -070065 case IPPROTO_TCP: // For TCP & UDP the checksum neutrality of the chosen IPv6
66 case IPPROTO_UDP: // address means there is no need to update their checksums.
67 case IPPROTO_GRE: // We do not need to bother looking at GRE/ESP headers,
Maciej Żenczykowski365ecc32019-08-20 18:00:02 -070068 case IPPROTO_ESP: // since there is never a checksum to update.
69 break;
70
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -070071 default: // do not know how to handle anything else
72 return TC_ACT_OK;
73 }
74
Maciej Żenczykowskib8a7be52019-04-19 21:47:08 -070075 ClatIngressKey k = {
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -070076 .iif = skb->ifindex,
77 .pfx96.in6_u.u6_addr32 =
78 {
79 ip6->saddr.in6_u.u6_addr32[0],
80 ip6->saddr.in6_u.u6_addr32[1],
81 ip6->saddr.in6_u.u6_addr32[2],
82 },
83 .local6 = ip6->daddr,
84 };
85
Maciej Żenczykowskib8a7be52019-04-19 21:47:08 -070086 ClatIngressValue* v = bpf_clat_ingress_map_lookup_elem(&k);
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -070087
88 if (!v) return TC_ACT_OK;
89
90 struct ethhdr eth2; // used iff is_ethernet
91 if (is_ethernet) {
92 eth2 = *eth; // Copy over the ethernet header (src/dst mac)
93 eth2.h_proto = htons(ETH_P_IP); // But replace the ethertype
94 }
95
96 struct iphdr ip = {
97 .version = 4, // u4
98 .ihl = sizeof(struct iphdr) / sizeof(__u32), // u4
99 .tos = (ip6->priority << 4) + (ip6->flow_lbl[0] >> 4), // u8
100 .tot_len = htons(ntohs(ip6->payload_len) + sizeof(struct iphdr)), // u16
101 .id = 0, // u16
102 .frag_off = htons(IP_DF), // u16
103 .ttl = ip6->hop_limit, // u8
104 .protocol = ip6->nexthdr, // u8
105 .check = 0, // u16
106 .saddr = ip6->saddr.in6_u.u6_addr32[3], // u32
107 .daddr = v->local4.s_addr, // u32
108 };
109
110 // Calculate the IPv4 one's complement checksum of the IPv4 header.
Maciej Żenczykowskibb83a512019-11-18 11:33:19 -0800111 __wsum sum4 = 0;
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -0700112 for (int i = 0; i < sizeof(ip) / sizeof(__u16); ++i) {
Maciej Żenczykowskibb83a512019-11-18 11:33:19 -0800113 sum4 += ((__u16*)&ip)[i];
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -0700114 }
Maciej Żenczykowskibb83a512019-11-18 11:33:19 -0800115 // Note that sum4 is guaranteed to be non-zero by virtue of ip.version == 4
116 sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse u32 into range 1 .. 0x1FFFE
117 sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse any potential carry into u16
118 ip.check = (__u16)~sum4; // sum4 cannot be zero, so this is never 0xFFFF
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -0700119
Maciej Żenczykowskifaf05a12019-11-13 01:35:17 -0800120 // Calculate the *negative* IPv6 16-bit one's complement checksum of the IPv6 header.
121 __wsum sum6 = 0;
122 // We'll end up with a non-zero sum due to ip6->version == 6 (which has '0' bits)
123 for (int i = 0; i < sizeof(*ip6) / sizeof(__u16); ++i) {
124 sum6 += ~((__u16*)ip6)[i]; // note the bitwise negation
125 }
126
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -0700127 // Note that there is no L4 checksum update: we are relying on the checksum neutrality
128 // of the ipv6 address chosen by netd's ClatdController.
129
Maciej Żenczykowskifaa3abc2019-10-31 03:31:11 -0700130 // Packet mutations begin - point of no return, but if this first modification fails
131 // the packet is probably still pristine, so let clatd handle it.
132 if (bpf_skb_change_proto(skb, htons(ETH_P_IP), 0)) return TC_ACT_OK;
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -0700133
Maciej Żenczykowskifaf05a12019-11-13 01:35:17 -0800134 // This takes care of updating the skb->csum field for a CHECKSUM_COMPLETE packet.
135 //
136 // In such a case, skb->csum is a 16-bit one's complement sum of the entire payload,
137 // thus we need to subtract out the ipv6 header's sum, and add in the ipv4 header's sum.
138 // However, by construction of ip.check above the checksum of an ipv4 header is zero.
139 // Thus we only need to subtract the ipv6 header's sum, which is the same as adding
140 // in the sum of the bitwise negation of the ipv6 header.
141 //
142 // bpf_csum_update() always succeeds if the skb is CHECKSUM_COMPLETE and returns an error
143 // (-ENOTSUPP) if it isn't. So we just ignore the return code.
144 //
145 // if (skb->ip_summed == CHECKSUM_COMPLETE)
146 // return (skb->csum = csum_add(skb->csum, csum));
147 // else
148 // return -ENOTSUPP;
149 bpf_csum_update(skb, sum6);
150
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -0700151 // bpf_skb_change_proto() invalidates all pointers - reload them.
152 data = (void*)(long)skb->data;
153 data_end = (void*)(long)skb->data_end;
154
155 // I cannot think of any valid way for this error condition to trigger, however I do
156 // believe the explicit check is required to keep the in kernel ebpf verifier happy.
157 if (data + l2_header_size + sizeof(struct iphdr) > data_end) return TC_ACT_SHOT;
158
159 if (is_ethernet) {
160 struct ethhdr* new_eth = data;
161
162 // Copy over the updated ethernet header
163 *new_eth = eth2;
164
165 // Copy over the new ipv4 header.
166 *(struct iphdr*)(new_eth + 1) = ip;
167 } else {
168 // Copy over the new ipv4 header without an ethernet header.
169 *(struct iphdr*)data = ip;
170 }
171
172 // Redirect, possibly back to same interface, so tcpdump sees packet twice.
173 if (v->oif) return bpf_redirect(v->oif, BPF_F_INGRESS);
174
175 // Just let it through, tcpdump will not see IPv4 packet.
176 return TC_ACT_OK;
177}
178
Maciej Żenczykowskidca6ce72019-03-07 16:54:12 -0800179SEC("schedcls/ingress/clat_ether")
180int sched_cls_ingress_clat_ether(struct __sk_buff* skb) {
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -0700181 return nat64(skb, true);
Maciej Żenczykowskidca6ce72019-03-07 16:54:12 -0800182}
183
184SEC("schedcls/ingress/clat_rawip")
185int sched_cls_ingress_clat_rawip(struct __sk_buff* skb) {
Maciej Żenczykowskic2237d82019-04-02 03:59:51 -0700186 return nat64(skb, false);
Maciej Żenczykowskidca6ce72019-03-07 16:54:12 -0800187}
188
Maciej Żenczykowski99bc7a92019-12-15 12:38:36 -0800189DEFINE_BPF_MAP(clat_egress_map, HASH, ClatEgressKey, ClatEgressValue, 16)
190
191SEC("schedcls/egress/clat_ether")
192int sched_cls_egress_clat_ether(struct __sk_buff* skb) {
193 return TC_ACT_OK;
194}
195
196SEC("schedcls/egress/clat_rawip")
197int sched_cls_egress_clat_rawip(struct __sk_buff* skb) {
Maciej Żenczykowski94005242019-12-17 10:26:22 -0800198 void* data = (void*)(long)skb->data;
199 const void* data_end = (void*)(long)skb->data_end;
200 const struct iphdr* const ip4 = data;
201
202 // Must be meta-ethernet IPv4 frame
203 if (skb->protocol != htons(ETH_P_IP)) return TC_ACT_OK;
204
205 // Must have ipv4 header
206 if (data + sizeof(*ip4) > data_end) return TC_ACT_OK;
207
208 // IP version must be 4
209 if (ip4->version != 4) return TC_ACT_OK;
210
211 // We cannot handle IP options, just standard 20 byte == 5 dword minimal IPv4 header
212 if (ip4->ihl != 5) return TC_ACT_OK;
213
214 // Calculate the IPv4 one's complement checksum of the IPv4 header.
215 __wsum sum4 = 0;
216 for (int i = 0; i < sizeof(*ip4) / sizeof(__u16); ++i) {
217 sum4 += ((__u16*)ip4)[i];
218 }
219 // Note that sum4 is guaranteed to be non-zero by virtue of ip4->version == 4
220 sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse u32 into range 1 .. 0x1FFFE
221 sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse any potential carry into u16
222 // for a correct checksum we should get *a* zero, but sum4 must be positive, ie 0xFFFF
223 if (sum4 != 0xFFFF) return TC_ACT_OK;
224
225 // Minimum IPv4 total length is the size of the header
226 if (ntohs(ip4->tot_len) < sizeof(*ip4)) return TC_ACT_OK;
227
228 // We are incapable of dealing with IPv4 fragments
229 if (ip4->frag_off & ~htons(IP_DF)) return TC_ACT_OK;
230
231 switch (ip4->protocol) {
232 case IPPROTO_TCP: // For TCP & UDP the checksum neutrality of the chosen IPv6
Maciej Żenczykowski59b62e62020-07-15 20:06:17 +0000233 case IPPROTO_GRE: // address means there is no need to update their checksums.
234 case IPPROTO_ESP: // We do not need to bother looking at GRE/ESP headers,
235 break; // since there is never a checksum to update.
236
237 case IPPROTO_UDP: // See above comment, but must also have UDP header...
238 if (data + sizeof(*ip4) + sizeof(struct udphdr) > data_end) return TC_ACT_OK;
239 const struct udphdr* uh = (const struct udphdr*)(ip4 + 1);
240 // If IPv4/UDP checksum is 0 then fallback to clatd so it can calculate the
241 // checksum. Otherwise the network or more likely the NAT64 gateway might
242 // drop the packet because in most cases IPv6/UDP packets with a zero checksum
243 // are invalid. See RFC 6935. TODO: calculate checksum via bpf_csum_diff()
244 if (!uh->check) return TC_ACT_OK;
Maciej Żenczykowski94005242019-12-17 10:26:22 -0800245 break;
246
247 default: // do not know how to handle anything else
248 return TC_ACT_OK;
249 }
250
251 ClatEgressKey k = {
252 .iif = skb->ifindex,
253 .local4.s_addr = ip4->saddr,
254 };
255
256 ClatEgressValue* v = bpf_clat_egress_map_lookup_elem(&k);
257
258 if (!v) return TC_ACT_OK;
259
260 // Translating without redirecting doesn't make sense.
261 if (!v->oif) return TC_ACT_OK;
262
263 // This implementation is currently limited to rawip.
264 if (v->oifIsEthernet) return TC_ACT_OK;
265
266 struct ipv6hdr ip6 = {
267 .version = 6, // __u8:4
268 .priority = ip4->tos >> 4, // __u8:4
269 .flow_lbl = {(ip4->tos & 0xF) << 4, 0, 0}, // __u8[3]
270 .payload_len = htons(ntohs(ip4->tot_len) - 20), // __be16
271 .nexthdr = ip4->protocol, // __u8
272 .hop_limit = ip4->ttl, // __u8
273 .saddr = v->local6, // struct in6_addr
274 .daddr = v->pfx96, // struct in6_addr
275 };
276 ip6.daddr.in6_u.u6_addr32[3] = ip4->daddr;
277
278 // Calculate the IPv6 16-bit one's complement checksum of the IPv6 header.
279 __wsum sum6 = 0;
280 // We'll end up with a non-zero sum due to ip6.version == 6
281 for (int i = 0; i < sizeof(ip6) / sizeof(__u16); ++i) {
282 sum6 += ((__u16*)&ip6)[i];
283 }
284
285 // Note that there is no L4 checksum update: we are relying on the checksum neutrality
286 // of the ipv6 address chosen by netd's ClatdController.
287
288 // Packet mutations begin - point of no return, but if this first modification fails
289 // the packet is probably still pristine, so let clatd handle it.
290 if (bpf_skb_change_proto(skb, htons(ETH_P_IPV6), 0)) return TC_ACT_OK;
291
292 // This takes care of updating the skb->csum field for a CHECKSUM_COMPLETE packet.
293 //
294 // In such a case, skb->csum is a 16-bit one's complement sum of the entire payload,
295 // thus we need to subtract out the ipv4 header's sum, and add in the ipv6 header's sum.
296 // However, we've already verified the ipv4 checksum is correct and thus 0.
297 // Thus we only need to add the ipv6 header's sum.
298 //
299 // bpf_csum_update() always succeeds if the skb is CHECKSUM_COMPLETE and returns an error
300 // (-ENOTSUPP) if it isn't. So we just ignore the return code (see above for more details).
301 bpf_csum_update(skb, sum6);
302
303 // bpf_skb_change_proto() invalidates all pointers - reload them.
304 data = (void*)(long)skb->data;
305 data_end = (void*)(long)skb->data_end;
306
307 // I cannot think of any valid way for this error condition to trigger, however I do
308 // believe the explicit check is required to keep the in kernel ebpf verifier happy.
309 if (data + sizeof(ip6) > data_end) return TC_ACT_SHOT;
310
311 // Copy over the new ipv6 header without an ethernet header.
312 *(struct ipv6hdr*)data = ip6;
313
314 // Redirect to non v4-* interface. Tcpdump only sees packet after this redirect.
315 return bpf_redirect(v->oif, 0 /* this is effectively BPF_F_EGRESS */);
Maciej Żenczykowski99bc7a92019-12-15 12:38:36 -0800316}
317
Treehugger Robot2b9df0c2020-03-20 23:23:27 +0000318LICENSE("Apache 2.0");
Maciej Żenczykowskia60b74e2020-06-15 08:25:49 +0000319CRITICAL("netd");