Snap for 7497286 from fadd7b7a334004e2785bdd2467c3586bdf1061d0 to sc-release
Change-Id: I90f8198ade91ab2d955d55541f79c9d139ed2709
diff --git a/Tethering/apishim/30/com/android/networkstack/tethering/apishim/api30/BpfCoordinatorShimImpl.java b/Tethering/apishim/30/com/android/networkstack/tethering/apishim/api30/BpfCoordinatorShimImpl.java
index 33f1c29..a33af61 100644
--- a/Tethering/apishim/30/com/android/networkstack/tethering/apishim/api30/BpfCoordinatorShimImpl.java
+++ b/Tethering/apishim/30/com/android/networkstack/tethering/apishim/api30/BpfCoordinatorShimImpl.java
@@ -33,6 +33,8 @@
import com.android.networkstack.tethering.Tether4Value;
import com.android.networkstack.tethering.TetherStatsValue;
+import java.util.function.BiConsumer;
+
/**
* Bpf coordinator class for API shims.
*/
@@ -161,6 +163,12 @@
}
@Override
+ public void tetherOffloadRuleForEach(boolean downstream,
+ @NonNull BiConsumer<Tether4Key, Tether4Value> action) {
+ /* no op */
+ }
+
+ @Override
public boolean attachProgram(String iface, boolean downstream) {
/* no op */
return true;
diff --git a/Tethering/apishim/31/com/android/networkstack/tethering/apishim/api31/BpfCoordinatorShimImpl.java b/Tethering/apishim/31/com/android/networkstack/tethering/apishim/api31/BpfCoordinatorShimImpl.java
index 74ddcbc..611c828 100644
--- a/Tethering/apishim/31/com/android/networkstack/tethering/apishim/api31/BpfCoordinatorShimImpl.java
+++ b/Tethering/apishim/31/com/android/networkstack/tethering/apishim/api31/BpfCoordinatorShimImpl.java
@@ -47,6 +47,7 @@
import java.io.FileDescriptor;
import java.io.IOException;
+import java.util.function.BiConsumer;
/**
* Bpf coordinator class for API shims.
@@ -380,10 +381,7 @@
try {
if (downstream) {
- if (!mBpfDownstream4Map.deleteEntry(key)) {
- mLog.e("Could not delete entry (key: " + key + ")");
- return false;
- }
+ if (!mBpfDownstream4Map.deleteEntry(key)) return false; // Rule did not exist
// Decrease the rule count while a deleting rule is not using a given upstream
// interface anymore.
@@ -401,19 +399,32 @@
mRule4CountOnUpstream.put(upstreamIfindex, count);
}
} else {
- mBpfUpstream4Map.deleteEntry(key);
+ if (!mBpfUpstream4Map.deleteEntry(key)) return false; // Rule did not exist
}
} catch (ErrnoException e) {
- // Silent if the rule did not exist.
- if (e.errno != OsConstants.ENOENT) {
- mLog.e("Could not delete entry: ", e);
- return false;
- }
+ mLog.e("Could not delete entry (key: " + key + ")", e);
+ return false;
}
return true;
}
@Override
+ public void tetherOffloadRuleForEach(boolean downstream,
+ @NonNull BiConsumer<Tether4Key, Tether4Value> action) {
+ if (!isInitialized()) return;
+
+ try {
+ if (downstream) {
+ mBpfDownstream4Map.forEach(action);
+ } else {
+ mBpfUpstream4Map.forEach(action);
+ }
+ } catch (ErrnoException e) {
+ mLog.e("Could not iterate map: ", e);
+ }
+ }
+
+ @Override
public boolean attachProgram(String iface, boolean downstream) {
if (!isInitialized()) return false;
diff --git a/Tethering/apishim/common/com/android/networkstack/tethering/apishim/common/BpfCoordinatorShim.java b/Tethering/apishim/common/com/android/networkstack/tethering/apishim/common/BpfCoordinatorShim.java
index 8a7a49c..08ab9ca 100644
--- a/Tethering/apishim/common/com/android/networkstack/tethering/apishim/common/BpfCoordinatorShim.java
+++ b/Tethering/apishim/common/com/android/networkstack/tethering/apishim/common/BpfCoordinatorShim.java
@@ -28,6 +28,8 @@
import com.android.networkstack.tethering.Tether4Value;
import com.android.networkstack.tethering.TetherStatsValue;
+import java.util.function.BiConsumer;
+
/**
* Bpf coordinator class for API shims.
*/
@@ -145,10 +147,25 @@
/**
* Deletes a tethering IPv4 offload rule from the appropriate BPF map.
+ *
+ * @param downstream true if downstream, false if upstream.
+ * @param key the key to delete.
+ * @return true iff the map was modified, false if the key did not exist or there was an error.
*/
public abstract boolean tetherOffloadRuleRemove(boolean downstream, @NonNull Tether4Key key);
/**
+ * Iterate through the map and handle each key -> value retrieved base on the given BiConsumer.
+ *
+ * @param downstream true if downstream, false if upstream.
+ * @param action represents the action for each key -> value. The entry deletion is not
+ * allowed and use #tetherOffloadRuleRemove instead.
+ */
+ @Nullable
+ public abstract void tetherOffloadRuleForEach(boolean downstream,
+ @NonNull BiConsumer<Tether4Key, Tether4Value> action);
+
+ /**
* Whether there is currently any IPv4 rule on the specified upstream.
*/
public abstract boolean isAnyIpv4RuleOnUpstream(int ifIndex);
diff --git a/Tethering/src/android/net/ip/IpServer.java b/Tethering/src/android/net/ip/IpServer.java
index 3428c1d..822bdf6 100644
--- a/Tethering/src/android/net/ip/IpServer.java
+++ b/Tethering/src/android/net/ip/IpServer.java
@@ -596,6 +596,7 @@
// into calls to InterfaceController, shared with startIPv4().
mInterfaceCtrl.clearIPv4Address();
mPrivateAddressCoordinator.releaseDownstream(this);
+ mBpfCoordinator.tetherOffloadClientClear(this);
mIpv4Address = null;
mStaticIpv4ServerAddr = null;
mStaticIpv4ClientAddr = null;
@@ -949,7 +950,6 @@
if (e.isValid()) {
mBpfCoordinator.tetherOffloadClientAdd(this, clientInfo);
} else {
- // TODO: Delete all related offload rules which are using this client.
mBpfCoordinator.tetherOffloadClientRemove(this, clientInfo);
}
}
@@ -1283,6 +1283,16 @@
super.exit();
}
+ // Note that IPv4 offload rules cleanup is implemented in BpfCoordinator while upstream
+ // state is null or changed because IPv4 and IPv6 tethering have different code flow
+ // and behaviour. While upstream is switching from offload supported interface to
+ // offload non-supportted interface, event CMD_TETHER_CONNECTION_CHANGED calls
+ // #cleanupUpstreamInterface but #cleanupUpstream because new UpstreamIfaceSet is not null.
+ // This case won't happen in IPv6 tethering because IPv6 tethering upstream state is
+ // reported by IPv6TetheringCoordinator. #cleanupUpstream is also called by unwirding
+ // adding NAT failure. In that case, the IPv4 offload rules are removed by #stopIPv4
+ // in the state machine. Once there is any case out whish is not covered by previous cases,
+ // probably consider clearing rules in #cleanupUpstream as well.
private void cleanupUpstream() {
if (mUpstreamIfaceSet == null) return;
diff --git a/Tethering/src/com/android/networkstack/tethering/BpfCoordinator.java b/Tethering/src/com/android/networkstack/tethering/BpfCoordinator.java
index 4a05c9f..9b95dac 100644
--- a/Tethering/src/com/android/networkstack/tethering/BpfCoordinator.java
+++ b/Tethering/src/com/android/networkstack/tethering/BpfCoordinator.java
@@ -34,7 +34,6 @@
import android.app.usage.NetworkStatsManager;
import android.net.INetd;
-import android.net.LinkProperties;
import android.net.MacAddress;
import android.net.NetworkStats;
import android.net.NetworkStats.Entry;
@@ -42,7 +41,9 @@
import android.net.ip.ConntrackMonitor;
import android.net.ip.ConntrackMonitor.ConntrackEventConsumer;
import android.net.ip.IpServer;
+import android.net.netlink.ConntrackMessage;
import android.net.netlink.NetlinkConstants;
+import android.net.netlink.NetlinkSocket;
import android.net.netstats.provider.NetworkStatsProvider;
import android.net.util.InterfaceParams;
import android.net.util.SharedLog;
@@ -50,7 +51,9 @@
import android.os.Handler;
import android.os.SystemClock;
import android.system.ErrnoException;
+import android.system.OsConstants;
import android.text.TextUtils;
+import android.util.ArraySet;
import android.util.Log;
import android.util.SparseArray;
@@ -69,6 +72,7 @@
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
@@ -120,6 +124,13 @@
}
@VisibleForTesting
+ static final int POLLING_CONNTRACK_TIMEOUT_MS = 60_000;
+ @VisibleForTesting
+ static final int NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED = 432000;
+ @VisibleForTesting
+ static final int NF_CONNTRACK_UDP_TIMEOUT_STREAM = 180;
+
+ @VisibleForTesting
enum StatsType {
STATS_PER_IFACE,
STATS_PER_UID,
@@ -228,12 +239,22 @@
// BpfCoordinatorTest needs predictable iteration order.
private final Set<Integer> mDeviceMapSet = new LinkedHashSet<>();
+ // Tracks the last IPv4 upstream index. Support single upstream only.
+ // TODO: Support multi-upstream interfaces.
+ private int mLastIPv4UpstreamIfindex = 0;
+
// Runnable that used by scheduling next polling of stats.
- private final Runnable mScheduledPollingTask = () -> {
+ private final Runnable mScheduledPollingStats = () -> {
updateForwardedStats();
maybeSchedulePollingStats();
};
+ // Runnable that used by scheduling next polling of conntrack timeout.
+ private final Runnable mScheduledPollingConntrackTimeout = () -> {
+ maybeRefreshConntrackTimeout();
+ maybeSchedulePollingConntrackTimeout();
+ };
+
// TODO: add BpfMap<TetherDownstream64Key, TetherDownstream64Value> retrieving function.
@VisibleForTesting
public abstract static class Dependencies {
@@ -263,13 +284,19 @@
}
/**
+ * Represents an estimate of elapsed time since boot in nanoseconds.
+ */
+ public long elapsedRealtimeNanos() {
+ return SystemClock.elapsedRealtimeNanos();
+ }
+
+ /**
* Check OS Build at least S.
*
* TODO: move to BpfCoordinatorShim once the test doesn't need the mocked OS build for
* testing different code flows concurrently.
*/
public boolean isAtLeastS() {
- // TODO: consider using ShimUtils.isAtLeastS.
return SdkLevel.isAtLeastS();
}
@@ -407,6 +434,7 @@
mPollingStarted = true;
maybeSchedulePollingStats();
+ maybeSchedulePollingConntrackTimeout();
mLog.i("Polling started");
}
@@ -422,9 +450,13 @@
public void stopPolling() {
if (!mPollingStarted) return;
- // Stop scheduled polling tasks and poll the latest stats from BPF maps.
- if (mHandler.hasCallbacks(mScheduledPollingTask)) {
- mHandler.removeCallbacks(mScheduledPollingTask);
+ // Stop scheduled polling conntrack timeout.
+ if (mHandler.hasCallbacks(mScheduledPollingConntrackTimeout)) {
+ mHandler.removeCallbacks(mScheduledPollingConntrackTimeout);
+ }
+ // Stop scheduled polling stats and poll the latest stats from BPF maps.
+ if (mHandler.hasCallbacks(mScheduledPollingStats)) {
+ mHandler.removeCallbacks(mScheduledPollingStats);
}
updateForwardedStats();
mPollingStarted = false;
@@ -576,6 +608,7 @@
/**
* Clear all forwarding rules for a given downstream.
* Note that this can be only called on handler thread.
+ * TODO: rename to tetherOffloadRuleClear6 because of IPv6 only.
*/
public void tetherOffloadRuleClear(@NonNull final IpServer ipServer) {
if (!isUsingBpf()) return;
@@ -647,6 +680,7 @@
/**
* Add downstream client.
+ * Note that this can be only called on handler thread.
*/
public void tetherOffloadClientAdd(@NonNull final IpServer ipServer,
@NonNull final ClientInfo client) {
@@ -661,54 +695,180 @@
}
/**
- * Remove downstream client.
+ * Remove a downstream client and its rules if any.
+ * Note that this can be only called on handler thread.
*/
public void tetherOffloadClientRemove(@NonNull final IpServer ipServer,
@NonNull final ClientInfo client) {
if (!isUsingBpf()) return;
+ // No clients on the downstream, return early.
HashMap<Inet4Address, ClientInfo> clients = mTetherClients.get(ipServer);
if (clients == null) return;
- // If no rule is removed, return early. Avoid unnecessary work on a non-existent rule
- // which may have never been added or removed already.
+ // No client is removed, return early.
if (clients.remove(client.clientAddress) == null) return;
- // Remove the downstream entry if it has no more rule.
+ // Remove the client's rules. Removing the client implies that its rules are not used
+ // anymore.
+ tetherOffloadRuleClear(client);
+
+ // Remove the downstream entry if it has no more client.
if (clients.isEmpty()) {
mTetherClients.remove(ipServer);
}
}
/**
- * Call when UpstreamNetworkState may be changed.
- * If upstream has ipv4 for tethering, update this new UpstreamNetworkState to map. The
- * upstream interface index and its address mapping is prepared for building IPv4
- * offload rule.
- *
- * TODO: Delete the unused upstream interface mapping.
- * TODO: Support ether ip upstream interface.
+ * Clear all downstream clients and their rules if any.
+ * Note that this can be only called on handler thread.
*/
- public void addUpstreamIfindexToMap(LinkProperties lp) {
- if (!mPollingStarted) return;
+ public void tetherOffloadClientClear(@NonNull final IpServer ipServer) {
+ if (!isUsingBpf()) return;
+
+ final HashMap<Inet4Address, ClientInfo> clients = mTetherClients.get(ipServer);
+ if (clients == null) return;
+
+ // Need to build a client list because the client map may be changed in the iteration.
+ for (final ClientInfo c : new ArrayList<ClientInfo>(clients.values())) {
+ tetherOffloadClientRemove(ipServer, c);
+ }
+ }
+
+ /**
+ * Clear all forwarding IPv4 rules for a given client.
+ * Note that this can be only called on handler thread.
+ */
+ private void tetherOffloadRuleClear(@NonNull final ClientInfo clientInfo) {
+ // TODO: consider removing the rules in #tetherOffloadRuleForEach once BpfMap#forEach
+ // can guarantee that deleting some pass-in rules in the BPF map iteration can still
+ // walk through every entry.
+ final Inet4Address clientAddr = clientInfo.clientAddress;
+ final Set<Integer> upstreamIndiceSet = new ArraySet<Integer>();
+ final Set<Tether4Key> deleteUpstreamRuleKeys = new ArraySet<Tether4Key>();
+ final Set<Tether4Key> deleteDownstreamRuleKeys = new ArraySet<Tether4Key>();
+
+ // Find the rules which are related with the given client.
+ mBpfCoordinatorShim.tetherOffloadRuleForEach(UPSTREAM, (k, v) -> {
+ if (Arrays.equals(k.src4, clientAddr.getAddress())) {
+ deleteUpstreamRuleKeys.add(k);
+ }
+ });
+ mBpfCoordinatorShim.tetherOffloadRuleForEach(DOWNSTREAM, (k, v) -> {
+ if (Arrays.equals(v.dst46, toIpv4MappedAddressBytes(clientAddr))) {
+ deleteDownstreamRuleKeys.add(k);
+ upstreamIndiceSet.add((int) k.iif);
+ }
+ });
+
+ // The rules should be paired on upstream and downstream map because they are added by
+ // conntrack events which have bidirectional information.
+ // TODO: Consider figuring out a way to fix. Probably delete all rules to fallback.
+ if (deleteUpstreamRuleKeys.size() != deleteDownstreamRuleKeys.size()) {
+ Log.wtf(TAG, "The deleting rule numbers are different on upstream4 and downstream4 ("
+ + "upstream: " + deleteUpstreamRuleKeys.size() + ", "
+ + "downstream: " + deleteDownstreamRuleKeys.size() + ").");
+ return;
+ }
+
+ // Delete the rules which are related with the given client.
+ for (final Tether4Key k : deleteUpstreamRuleKeys) {
+ mBpfCoordinatorShim.tetherOffloadRuleRemove(UPSTREAM, k);
+ }
+ for (final Tether4Key k : deleteDownstreamRuleKeys) {
+ mBpfCoordinatorShim.tetherOffloadRuleRemove(DOWNSTREAM, k);
+ }
+
+ // Cleanup each upstream interface by a set which avoids duplicated work on the same
+ // upstream interface. Cleaning up the same interface twice (or more) here may raise
+ // an exception because all related information were removed in the first deletion.
+ for (final int upstreamIndex : upstreamIndiceSet) {
+ maybeClearLimit(upstreamIndex);
+ }
+ }
+
+ /**
+ * Clear all forwarding IPv4 rules for a given downstream. Needed because the client may still
+ * connect on the downstream but the existing rules are not required anymore. Ex: upstream
+ * changed.
+ */
+ private void tetherOffloadRule4Clear(@NonNull final IpServer ipServer) {
+ if (!isUsingBpf()) return;
+
+ final HashMap<Inet4Address, ClientInfo> clients = mTetherClients.get(ipServer);
+ if (clients == null) return;
+
+ // The value should be unique as its key because currently the key was using from its
+ // client address of ClientInfo. See #tetherOffloadClientAdd.
+ for (final ClientInfo client : clients.values()) {
+ tetherOffloadRuleClear(client);
+ }
+ }
+
+ private boolean isValidUpstreamIpv4Address(@NonNull final InetAddress addr) {
+ if (!(addr instanceof Inet4Address)) return false;
+ Inet4Address v4 = (Inet4Address) addr;
+ if (v4.isAnyLocalAddress() || v4.isLinkLocalAddress()
+ || v4.isLoopbackAddress() || v4.isMulticastAddress()) {
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * Call when UpstreamNetworkState may be changed.
+ * If upstream has ipv4 for tethering, update this new UpstreamNetworkState
+ * to BpfCoordinator for building upstream interface index mapping. Otherwise,
+ * clear the all existing rules if any.
+ *
+ * Note that this can be only called on handler thread.
+ */
+ public void updateUpstreamNetworkState(UpstreamNetworkState ns) {
+ if (!isUsingBpf()) return;
+
+ int upstreamIndex = 0;
// This will not work on a network that is using 464xlat because hasIpv4Address will not be
// true.
// TODO: need to consider 464xlat.
- if (lp == null || !lp.hasIpv4Address()) return;
+ if (ns != null && ns.linkProperties != null && ns.linkProperties.hasIpv4Address()) {
+ // TODO: support ether ip upstream interface.
+ final InterfaceParams params = mDeps.getInterfaceParams(
+ ns.linkProperties.getInterfaceName());
+ if (params != null && !params.hasMacAddress /* raw ip upstream only */) {
+ upstreamIndex = params.index;
+ }
+ }
+ if (mLastIPv4UpstreamIfindex == upstreamIndex) return;
- // Support raw ip upstream interface only.
- final InterfaceParams params = mDeps.getInterfaceParams(lp.getInterfaceName());
- if (params == null || params.hasMacAddress) return;
+ // Clear existing rules if upstream interface is changed. The existing rules should be
+ // cleared before upstream index mapping is cleared. It can avoid that ipServer or
+ // conntrack event may use the non-existing upstream interfeace index to build a removing
+ // key while removeing the rules. Can't notify each IpServer to clear the rules as
+ // IPv6TetheringCoordinator#updateUpstreamNetworkState because the IpServer may not
+ // handle the upstream changing notification before changing upstream index mapping.
+ if (mLastIPv4UpstreamIfindex != 0) {
+ // Clear all forwarding IPv4 rules for all downstreams.
+ for (final IpServer ipserver : mTetherClients.keySet()) {
+ tetherOffloadRule4Clear(ipserver);
+ }
+ }
- Collection<InetAddress> addresses = lp.getAddresses();
- for (InetAddress addr: addresses) {
- if (addr instanceof Inet4Address) {
- Inet4Address i4addr = (Inet4Address) addr;
- if (!i4addr.isAnyLocalAddress() && !i4addr.isLinkLocalAddress()
- && !i4addr.isLoopbackAddress() && !i4addr.isMulticastAddress()) {
- mIpv4UpstreamIndices.put(i4addr, params.index);
- }
+ // Don't update mLastIPv4UpstreamIfindex before clearing existing rules if any. Need that
+ // to tell if it is required to clean the out-of-date rules.
+ mLastIPv4UpstreamIfindex = upstreamIndex;
+
+ // If link properties are valid, build the upstream information mapping. Otherwise, clear
+ // the upstream interface index mapping, to ensure that any conntrack events that arrive
+ // after the upstream is lost do not incorrectly add rules pointing at the upstream.
+ if (upstreamIndex == 0) {
+ mIpv4UpstreamIndices.clear();
+ return;
+ }
+ Collection<InetAddress> addresses = ns.linkProperties.getAddresses();
+ for (final InetAddress addr: addresses) {
+ if (isValidUpstreamIpv4Address(addr)) {
+ mIpv4UpstreamIndices.put((Inet4Address) addr, upstreamIndex);
}
}
}
@@ -793,6 +953,24 @@
dumpDevmap(pw);
pw.decreaseIndent();
+ pw.println("Client Information:");
+ pw.increaseIndent();
+ if (mTetherClients.isEmpty()) {
+ pw.println("<empty>");
+ } else {
+ pw.println(mTetherClients.toString());
+ }
+ pw.decreaseIndent();
+
+ pw.println("IPv4 Upstream Indices:");
+ pw.increaseIndent();
+ if (mIpv4UpstreamIndices.isEmpty()) {
+ pw.println("<empty>");
+ } else {
+ pw.println(mIpv4UpstreamIndices.toString());
+ }
+ pw.decreaseIndent();
+
pw.println();
pw.println("Forwarding counters:");
pw.increaseIndent();
@@ -971,14 +1149,14 @@
return;
}
if (map.isEmpty()) {
- pw.println("No interface index");
+ pw.println("<empty>");
return;
}
pw.println("ifindex (iface) -> ifindex (iface)");
pw.increaseIndent();
map.forEach((k, v) -> {
// Only get upstream interface name. Just do the best to make the index readable.
- // TODO: get downstream interface name because the index is either upstrema or
+ // TODO: get downstream interface name because the index is either upstream or
// downstream interface in dev map.
pw.println(String.format("%d (%s) -> %d (%s)", k.ifIndex, getIfName(k.ifIndex),
v.ifIndex, getIfName(v.ifIndex)));
@@ -1248,12 +1426,99 @@
return null;
}
- // Support raw ip only.
- // TODO: add ether ip support.
+ @NonNull
+ private byte[] toIpv4MappedAddressBytes(Inet4Address ia4) {
+ final byte[] addr4 = ia4.getAddress();
+ final byte[] addr6 = new byte[16];
+ addr6[10] = (byte) 0xff;
+ addr6[11] = (byte) 0xff;
+ addr6[12] = addr4[0];
+ addr6[13] = addr4[1];
+ addr6[14] = addr4[2];
+ addr6[15] = addr4[3];
+ return addr6;
+ }
+
+ @Nullable
+ private Inet4Address ipv4MappedAddressBytesToIpv4Address(final byte[] addr46) {
+ if (addr46.length != 16) return null;
+ if (addr46[0] != 0 || addr46[1] != 0 || addr46[2] != 0 || addr46[3] != 0
+ || addr46[4] != 0 || addr46[5] != 0 || addr46[6] != 0 || addr46[7] != 0
+ || addr46[8] != 0 && addr46[9] != 0 || (addr46[10] & 0xff) != 0xff
+ || (addr46[11] & 0xff) != 0xff) {
+ return null;
+ }
+
+ final byte[] addr4 = new byte[4];
+ addr4[0] = addr46[12];
+ addr4[1] = addr46[13];
+ addr4[2] = addr46[14];
+ addr4[3] = addr46[15];
+
+ return parseIPv4Address(addr4);
+ }
+
// TODO: parse CTA_PROTOINFO of conntrack event in ConntrackMonitor. For TCP, only add rules
// while TCP status is established.
@VisibleForTesting
class BpfConntrackEventConsumer implements ConntrackEventConsumer {
+ // The upstream4 and downstream4 rules are built as the following tables. Only raw ip
+ // upstream interface is supported. Note that the field "lastUsed" is only updated by
+ // BPF program which records the last used time for a given rule.
+ // TODO: support ether ip upstream interface.
+ //
+ // NAT network topology:
+ //
+ // public network (rawip) private network
+ // | UE |
+ // +------------+ V +------------+------------+ V +------------+
+ // | Sever +---------+ Upstream | Downstream +---------+ Client |
+ // +------------+ +------------+------------+ +------------+
+ //
+ // upstream4 key and value:
+ //
+ // +------+------------------------------------------------+
+ // | | TetherUpstream4Key |
+ // +------+------+------+------+------+------+------+------+
+ // |field |iif |dstMac|l4prot|src4 |dst4 |srcPor|dstPor|
+ // | | | |o | | |t |t |
+ // +------+------+------+------+------+------+------+------+
+ // |value |downst|downst|tcp/ |client|server|client|server|
+ // | |ream |ream |udp | | | | |
+ // +------+------+------+------+------+------+------+------+
+ //
+ // +------+---------------------------------------------------------------------+
+ // | | TetherUpstream4Value |
+ // +------+------+------+------+------+------+------+------+------+------+------+
+ // |field |oif |ethDst|ethSrc|ethPro|pmtu |src46 |dst46 |srcPor|dstPor|lastUs|
+ // | | |mac |mac |to | | | |t |t |ed |
+ // +------+------+------+------+------+------+------+------+------+------+------+
+ // |value |upstre|-- |-- |ETH_P_|1500 |upstre|server|upstre|server|-- |
+ // | |am | | |IP | |am | |am | | |
+ // +------+------+------+------+------+------+------+------+------+------+------+
+ //
+ // downstream4 key and value:
+ //
+ // +------+------------------------------------------------+
+ // | | TetherDownstream4Key |
+ // +------+------+------+------+------+------+------+------+
+ // |field |iif |dstMac|l4prot|src4 |dst4 |srcPor|dstPor|
+ // | | | |o | | |t |t |
+ // +------+------+------+------+------+------+------+------+
+ // |value |upstre|-- |tcp/ |server|upstre|server|upstre|
+ // | |am | |udp | |am | |am |
+ // +------+------+------+------+------+------+------+------+
+ //
+ // +------+---------------------------------------------------------------------+
+ // | | TetherDownstream4Value |
+ // +------+------+------+------+------+------+------+------+------+------+------+
+ // |field |oif |ethDst|ethSrc|ethPro|pmtu |src46 |dst46 |srcPor|dstPor|lastUs|
+ // | | |mac |mac |to | | | |t |t |ed |
+ // +------+------+------+------+------+------+------+------+------+------+------+
+ // |value |downst|client|downst|ETH_P_|1500 |server|client|server|client|-- |
+ // | |ream | |ream |IP | | | | | | |
+ // +------+------+------+------+------+------+------+------+------+------+------+
+ //
@NonNull
private Tether4Key makeTetherUpstream4Key(
@NonNull ConntrackEvent e, @NonNull ClientInfo c) {
@@ -1292,19 +1557,6 @@
0 /* lastUsed, filled by bpf prog only */);
}
- @NonNull
- private byte[] toIpv4MappedAddressBytes(Inet4Address ia4) {
- final byte[] addr4 = ia4.getAddress();
- final byte[] addr6 = new byte[16];
- addr6[10] = (byte) 0xff;
- addr6[11] = (byte) 0xff;
- addr6[12] = addr4[0];
- addr6[13] = addr4[1];
- addr6[14] = addr4[2];
- addr6[15] = addr4[3];
- return addr6;
- }
-
public void accept(ConntrackEvent e) {
final ClientInfo tetherClient = getClientInfo(e.tupleOrig.srcIp);
if (tetherClient == null) return;
@@ -1318,8 +1570,23 @@
if (e.msgType == (NetlinkConstants.NFNL_SUBSYS_CTNETLINK << 8
| NetlinkConstants.IPCTNL_MSG_CT_DELETE)) {
- mBpfCoordinatorShim.tetherOffloadRuleRemove(UPSTREAM, upstream4Key);
- mBpfCoordinatorShim.tetherOffloadRuleRemove(DOWNSTREAM, downstream4Key);
+ final boolean deletedUpstream = mBpfCoordinatorShim.tetherOffloadRuleRemove(
+ UPSTREAM, upstream4Key);
+ final boolean deletedDownstream = mBpfCoordinatorShim.tetherOffloadRuleRemove(
+ DOWNSTREAM, downstream4Key);
+
+ if (!deletedUpstream && !deletedDownstream) {
+ // The rules may have been already removed by losing client or losing upstream.
+ return;
+ }
+
+ if (deletedUpstream != deletedDownstream) {
+ Log.wtf(TAG, "The bidirectional rules should be removed concurrently ("
+ + "upstream: " + deletedUpstream
+ + ", downstream: " + deletedDownstream + ")");
+ return;
+ }
+
maybeClearLimit(upstreamIndex);
return;
}
@@ -1585,14 +1852,89 @@
return Math.max(DEFAULT_TETHER_OFFLOAD_POLL_INTERVAL_MS, configInterval);
}
+ @Nullable
+ private Inet4Address parseIPv4Address(byte[] addrBytes) {
+ try {
+ final InetAddress ia = Inet4Address.getByAddress(addrBytes);
+ if (ia instanceof Inet4Address) return (Inet4Address) ia;
+ } catch (UnknownHostException | IllegalArgumentException e) {
+ mLog.e("Failed to parse IPv4 address: " + e);
+ }
+ return null;
+ }
+
+ // Update CTA_TUPLE_ORIG timeout for a given conntrack entry. Note that there will also be
+ // coming a conntrack event to notify updated timeout.
+ private void updateConntrackTimeout(byte proto, Inet4Address src4, short srcPort,
+ Inet4Address dst4, short dstPort) {
+ if (src4 == null || dst4 == null) return;
+
+ // TODO: consider acquiring the timeout setting from nf_conntrack_* variables.
+ // - proc/sys/net/netfilter/nf_conntrack_tcp_timeout_established
+ // - proc/sys/net/netfilter/nf_conntrack_udp_timeout_stream
+ // See kernel document nf_conntrack-sysctl.txt.
+ final int timeoutSec = (proto == OsConstants.IPPROTO_TCP)
+ ? NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED
+ : NF_CONNTRACK_UDP_TIMEOUT_STREAM;
+ final byte[] msg = ConntrackMessage.newIPv4TimeoutUpdateRequest(
+ proto, src4, (int) srcPort, dst4, (int) dstPort, timeoutSec);
+ try {
+ NetlinkSocket.sendOneShotKernelMessage(OsConstants.NETLINK_NETFILTER, msg);
+ } catch (ErrnoException e) {
+ mLog.e("Error updating conntrack entry ("
+ + "proto: " + proto + ", "
+ + "src4: " + src4 + ", "
+ + "srcPort: " + Short.toUnsignedInt(srcPort) + ", "
+ + "dst4: " + dst4 + ", "
+ + "dstPort: " + Short.toUnsignedInt(dstPort) + "), "
+ + "msg: " + NetlinkConstants.hexify(msg) + ", "
+ + "e: " + e);
+ }
+ }
+
+ private void maybeRefreshConntrackTimeout() {
+ final long now = mDeps.elapsedRealtimeNanos();
+
+ // Reverse the source and destination {address, port} from downstream value because
+ // #updateConntrackTimeout refresh the timeout of netlink attribute CTA_TUPLE_ORIG
+ // which is opposite direction for downstream map value.
+ mBpfCoordinatorShim.tetherOffloadRuleForEach(DOWNSTREAM, (k, v) -> {
+ if ((now - v.lastUsed) / 1_000_000 < POLLING_CONNTRACK_TIMEOUT_MS) {
+ updateConntrackTimeout((byte) k.l4proto,
+ ipv4MappedAddressBytesToIpv4Address(v.dst46), (short) v.dstPort,
+ ipv4MappedAddressBytesToIpv4Address(v.src46), (short) v.srcPort);
+ }
+ });
+
+ // TODO: Consider ignoring TCP traffic on upstream and monitor on downstream only
+ // because TCP is a bidirectional traffic. Probably don't need to extend timeout by
+ // both directions for TCP.
+ mBpfCoordinatorShim.tetherOffloadRuleForEach(UPSTREAM, (k, v) -> {
+ if ((now - v.lastUsed) / 1_000_000 < POLLING_CONNTRACK_TIMEOUT_MS) {
+ updateConntrackTimeout((byte) k.l4proto, parseIPv4Address(k.src4),
+ (short) k.srcPort, parseIPv4Address(k.dst4), (short) k.dstPort);
+ }
+ });
+ }
+
private void maybeSchedulePollingStats() {
if (!mPollingStarted) return;
- if (mHandler.hasCallbacks(mScheduledPollingTask)) {
- mHandler.removeCallbacks(mScheduledPollingTask);
+ if (mHandler.hasCallbacks(mScheduledPollingStats)) {
+ mHandler.removeCallbacks(mScheduledPollingStats);
}
- mHandler.postDelayed(mScheduledPollingTask, getPollingInterval());
+ mHandler.postDelayed(mScheduledPollingStats, getPollingInterval());
+ }
+
+ private void maybeSchedulePollingConntrackTimeout() {
+ if (!mPollingStarted) return;
+
+ if (mHandler.hasCallbacks(mScheduledPollingConntrackTimeout)) {
+ mHandler.removeCallbacks(mScheduledPollingConntrackTimeout);
+ }
+
+ mHandler.postDelayed(mScheduledPollingConntrackTimeout, POLLING_CONNTRACK_TIMEOUT_MS);
}
// Return forwarding rule map. This is used for testing only.
diff --git a/Tethering/src/com/android/networkstack/tethering/Tethering.java b/Tethering/src/com/android/networkstack/tethering/Tethering.java
index b52ec86..a381621 100644
--- a/Tethering/src/com/android/networkstack/tethering/Tethering.java
+++ b/Tethering/src/com/android/networkstack/tethering/Tethering.java
@@ -1720,13 +1720,7 @@
protected void handleNewUpstreamNetworkState(UpstreamNetworkState ns) {
mIPv6TetheringCoordinator.updateUpstreamNetworkState(ns);
mOffload.updateUpstreamNetworkState(ns);
-
- // TODO: Delete all related offload rules which are using this upstream.
- if (ns != null) {
- // Add upstream index to the map. The upstream interface index is required while
- // the conntrack event builds the offload rules.
- mBpfCoordinator.addUpstreamIfindexToMap(ns.linkProperties);
- }
+ mBpfCoordinator.updateUpstreamNetworkState(ns);
}
private void handleInterfaceServingStateActive(int mode, IpServer who) {
diff --git a/Tethering/tests/unit/src/android/net/ip/IpServerTest.java b/Tethering/tests/unit/src/android/net/ip/IpServerTest.java
index ce69cb3..378a21c 100644
--- a/Tethering/tests/unit/src/android/net/ip/IpServerTest.java
+++ b/Tethering/tests/unit/src/android/net/ip/IpServerTest.java
@@ -584,6 +584,7 @@
inOrder.verify(mNetd).networkRemoveInterface(INetd.LOCAL_NET_ID, IFACE_NAME);
inOrder.verify(mNetd).interfaceSetCfg(argThat(cfg -> IFACE_NAME.equals(cfg.ifName)));
inOrder.verify(mAddressCoordinator).releaseDownstream(any());
+ inOrder.verify(mBpfCoordinator).tetherOffloadClientClear(mIpServer);
inOrder.verify(mBpfCoordinator).stopMonitoring(mIpServer);
inOrder.verify(mCallback).updateInterfaceState(
mIpServer, STATE_AVAILABLE, TETHER_ERROR_NO_ERROR);
diff --git a/Tethering/tests/unit/src/com/android/networkstack/tethering/BpfCoordinatorTest.java b/Tethering/tests/unit/src/com/android/networkstack/tethering/BpfCoordinatorTest.java
index cc912f4..914e0d4 100644
--- a/Tethering/tests/unit/src/com/android/networkstack/tethering/BpfCoordinatorTest.java
+++ b/Tethering/tests/unit/src/com/android/networkstack/tethering/BpfCoordinatorTest.java
@@ -36,9 +36,13 @@
import static android.system.OsConstants.ETH_P_IPV6;
import static android.system.OsConstants.IPPROTO_TCP;
import static android.system.OsConstants.IPPROTO_UDP;
+import static android.system.OsConstants.NETLINK_NETFILTER;
import static com.android.dx.mockito.inline.extended.ExtendedMockito.doReturn;
import static com.android.dx.mockito.inline.extended.ExtendedMockito.staticMockMarker;
+import static com.android.networkstack.tethering.BpfCoordinator.NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED;
+import static com.android.networkstack.tethering.BpfCoordinator.NF_CONNTRACK_UDP_TIMEOUT_STREAM;
+import static com.android.networkstack.tethering.BpfCoordinator.POLLING_CONNTRACK_TIMEOUT_MS;
import static com.android.networkstack.tethering.BpfCoordinator.StatsType;
import static com.android.networkstack.tethering.BpfCoordinator.StatsType.STATS_PER_IFACE;
import static com.android.networkstack.tethering.BpfCoordinator.StatsType.STATS_PER_UID;
@@ -70,13 +74,17 @@
import android.net.LinkAddress;
import android.net.LinkProperties;
import android.net.MacAddress;
+import android.net.Network;
+import android.net.NetworkCapabilities;
import android.net.NetworkStats;
import android.net.TetherOffloadRuleParcel;
import android.net.TetherStatsParcel;
import android.net.ip.ConntrackMonitor;
import android.net.ip.ConntrackMonitor.ConntrackEventConsumer;
import android.net.ip.IpServer;
+import android.net.netlink.ConntrackMessage;
import android.net.netlink.NetlinkConstants;
+import android.net.netlink.NetlinkSocket;
import android.net.util.InterfaceParams;
import android.net.util.SharedLog;
import android.os.Build;
@@ -127,6 +135,8 @@
@Rule
public final DevSdkIgnoreRule mIgnoreRule = new DevSdkIgnoreRule();
+ private static final int TEST_NET_ID = 24;
+
private static final int UPSTREAM_IFINDEX = 1001;
private static final int DOWNSTREAM_IFINDEX = 1002;
@@ -217,6 +227,7 @@
// it has to access the non-static function of BPF coordinator.
private BpfConntrackEventConsumer mConsumer;
+ private long mElapsedRealtimeNanos = 0;
private final ArgumentCaptor<ArrayList> mStringArrayCaptor =
ArgumentCaptor.forClass(ArrayList.class);
private final TestLooper mTestLooper = new TestLooper();
@@ -256,6 +267,10 @@
return mConntrackMonitor;
}
+ public long elapsedRealtimeNanos() {
+ return mElapsedRealtimeNanos;
+ }
+
@Nullable
public BpfMap<Tether4Key, Tether4Value> getBpfDownstream4Map() {
return mBpfDownstream4Map;
@@ -1340,6 +1355,11 @@
}
@NonNull
+ private Tether4Key makeDownstream4Key() {
+ return makeDownstream4Key(IPPROTO_TCP);
+ }
+
+ @NonNull
private ConntrackEvent makeTestConntrackEvent(short msgType, int proto) {
if (msgType != IPCTNL_MSG_CT_NEW && msgType != IPCTNL_MSG_CT_DELETE) {
fail("Not support message type " + msgType);
@@ -1365,7 +1385,10 @@
final LinkProperties lp = new LinkProperties();
lp.setInterfaceName(UPSTREAM_IFACE);
lp.addLinkAddress(new LinkAddress(PUBLIC_ADDR, 32 /* prefix length */));
- coordinator.addUpstreamIfindexToMap(lp);
+ final NetworkCapabilities capabilities = new NetworkCapabilities()
+ .addTransportType(NetworkCapabilities.TRANSPORT_CELLULAR);
+ coordinator.updateUpstreamNetworkState(new UpstreamNetworkState(lp, capabilities,
+ new Network(TEST_NET_ID)));
}
private void setDownstreamAndClientInformationTo(final BpfCoordinator coordinator) {
@@ -1379,8 +1402,11 @@
// was started.
coordinator.startPolling();
- // Needed because tetherOffloadRuleRemove of api31.BpfCoordinatorShimImpl only decreases
- // the count while the entry is deleted. In the other words, deleteEntry returns true.
+ // Needed because two reasons: (1) BpfConntrackEventConsumer#accept only performs cleanup
+ // when both upstream and downstream rules are removed. (2) tetherOffloadRuleRemove of
+ // api31.BpfCoordinatorShimImpl only decreases the count while the entry is deleted.
+ // In the other words, deleteEntry returns true.
+ doReturn(true).when(mBpfUpstream4Map).deleteEntry(any());
doReturn(true).when(mBpfDownstream4Map).deleteEntry(any());
// Needed because BpfCoordinator#addUpstreamIfindexToMap queries interface parameter for
@@ -1494,4 +1520,104 @@
mConsumer.accept(makeTestConntrackEvent(IPCTNL_MSG_CT_NEW, IPPROTO_UDP));
verify(mBpfDevMap, never()).updateEntry(any(), any());
}
+
+ private void setElapsedRealtimeNanos(long nanoSec) {
+ mElapsedRealtimeNanos = nanoSec;
+ }
+
+ private void checkRefreshConntrackTimeout(final TestBpfMap<Tether4Key, Tether4Value> bpfMap,
+ final Tether4Key tcpKey, final Tether4Value tcpValue, final Tether4Key udpKey,
+ final Tether4Value udpValue) throws Exception {
+ // Both system elapsed time since boot and the rule last used time are used to measure
+ // the rule expiration. In this test, all test rules are fixed the last used time to 0.
+ // Set the different testing elapsed time to make the rule to be valid or expired.
+ //
+ // Timeline:
+ // 0 60 (seconds)
+ // +---+---+---+---+--...--+---+---+---+---+---+- ..
+ // | POLLING_CONNTRACK_TIMEOUT_MS |
+ // +---+---+---+---+--...--+---+---+---+---+---+- ..
+ // |<- valid diff ->|
+ // |<- expired diff ->|
+ // ^ ^ ^
+ // last used time elapsed time (valid) elapsed time (expired)
+ final long validTime = (POLLING_CONNTRACK_TIMEOUT_MS - 1) * 1_000_000L;
+ final long expiredTime = (POLLING_CONNTRACK_TIMEOUT_MS + 1) * 1_000_000L;
+
+ // Static mocking for NetlinkSocket.
+ MockitoSession mockSession = ExtendedMockito.mockitoSession()
+ .mockStatic(NetlinkSocket.class)
+ .startMocking();
+ try {
+ final BpfCoordinator coordinator = makeBpfCoordinator();
+ coordinator.startPolling();
+ bpfMap.insertEntry(tcpKey, tcpValue);
+ bpfMap.insertEntry(udpKey, udpValue);
+
+ // [1] Don't refresh contrack timeout.
+ setElapsedRealtimeNanos(expiredTime);
+ mTestLooper.moveTimeForward(POLLING_CONNTRACK_TIMEOUT_MS);
+ waitForIdle();
+ ExtendedMockito.verifyNoMoreInteractions(staticMockMarker(NetlinkSocket.class));
+ ExtendedMockito.clearInvocations(staticMockMarker(NetlinkSocket.class));
+
+ // [2] Refresh contrack timeout.
+ setElapsedRealtimeNanos(validTime);
+ mTestLooper.moveTimeForward(POLLING_CONNTRACK_TIMEOUT_MS);
+ waitForIdle();
+ final byte[] expectedNetlinkTcp = ConntrackMessage.newIPv4TimeoutUpdateRequest(
+ IPPROTO_TCP, PRIVATE_ADDR, (int) PRIVATE_PORT, REMOTE_ADDR,
+ (int) REMOTE_PORT, NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED);
+ final byte[] expectedNetlinkUdp = ConntrackMessage.newIPv4TimeoutUpdateRequest(
+ IPPROTO_UDP, PRIVATE_ADDR, (int) PRIVATE_PORT, REMOTE_ADDR,
+ (int) REMOTE_PORT, NF_CONNTRACK_UDP_TIMEOUT_STREAM);
+ ExtendedMockito.verify(() -> NetlinkSocket.sendOneShotKernelMessage(
+ eq(NETLINK_NETFILTER), eq(expectedNetlinkTcp)));
+ ExtendedMockito.verify(() -> NetlinkSocket.sendOneShotKernelMessage(
+ eq(NETLINK_NETFILTER), eq(expectedNetlinkUdp)));
+ ExtendedMockito.verifyNoMoreInteractions(staticMockMarker(NetlinkSocket.class));
+ ExtendedMockito.clearInvocations(staticMockMarker(NetlinkSocket.class));
+
+ // [3] Don't refresh contrack timeout if polling stopped.
+ coordinator.stopPolling();
+ mTestLooper.moveTimeForward(POLLING_CONNTRACK_TIMEOUT_MS);
+ waitForIdle();
+ ExtendedMockito.verifyNoMoreInteractions(staticMockMarker(NetlinkSocket.class));
+ ExtendedMockito.clearInvocations(staticMockMarker(NetlinkSocket.class));
+ } finally {
+ mockSession.finishMocking();
+ }
+ }
+
+ @Test
+ @IgnoreUpTo(Build.VERSION_CODES.R)
+ public void testRefreshConntrackTimeout_Upstream4Map() throws Exception {
+ // TODO: Replace the dependencies BPF map with a non-mocked TestBpfMap object.
+ final TestBpfMap<Tether4Key, Tether4Value> bpfUpstream4Map =
+ new TestBpfMap<>(Tether4Key.class, Tether4Value.class);
+ doReturn(bpfUpstream4Map).when(mDeps).getBpfUpstream4Map();
+
+ final Tether4Key tcpKey = makeUpstream4Key(IPPROTO_TCP);
+ final Tether4Key udpKey = makeUpstream4Key(IPPROTO_UDP);
+ final Tether4Value tcpValue = makeUpstream4Value();
+ final Tether4Value udpValue = makeUpstream4Value();
+
+ checkRefreshConntrackTimeout(bpfUpstream4Map, tcpKey, tcpValue, udpKey, udpValue);
+ }
+
+ @Test
+ @IgnoreUpTo(Build.VERSION_CODES.R)
+ public void testRefreshConntrackTimeout_Downstream4Map() throws Exception {
+ // TODO: Replace the dependencies BPF map with a non-mocked TestBpfMap object.
+ final TestBpfMap<Tether4Key, Tether4Value> bpfDownstream4Map =
+ new TestBpfMap<>(Tether4Key.class, Tether4Value.class);
+ doReturn(bpfDownstream4Map).when(mDeps).getBpfDownstream4Map();
+
+ final Tether4Key tcpKey = makeDownstream4Key(IPPROTO_TCP);
+ final Tether4Key udpKey = makeDownstream4Key(IPPROTO_UDP);
+ final Tether4Value tcpValue = makeDownstream4Value();
+ final Tether4Value udpValue = makeDownstream4Value();
+
+ checkRefreshConntrackTimeout(bpfDownstream4Map, tcpKey, tcpValue, udpKey, udpValue);
+ }
}