blob: 5ee04d1a5370c3348ca458566a53c73a50547579 [file] [log] [blame]
Ken Chen1647f602021-10-05 21:55:22 +08001/**
2 * Copyright (c) 2022, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#pragma once
18
19#include <mutex>
20
21#include <netdutils/Status.h>
22#include "bpf/BpfMap.h"
23#include "bpf_shared.h"
24
25using android::bpf::BpfMap;
Maciej Żenczykowskieb9b6fa2022-06-13 17:28:41 -070026using android::bpf::BpfMapRO;
Ken Chen1647f602021-10-05 21:55:22 +080027
28namespace android {
29namespace net {
30
31class BpfHandler {
32 public:
33 BpfHandler();
34 BpfHandler(const BpfHandler&) = delete;
35 BpfHandler& operator=(const BpfHandler&) = delete;
36 netdutils::Status init(const char* cg2_path);
37 /*
38 * Tag the socket with the specified tag and uid. In the qtaguid module, the
39 * first tag request that grab the spinlock of rb_tree can update the tag
40 * information first and other request need to wait until it finish. All the
41 * tag request will be addressed in the order of they obtaining the spinlock.
42 * In the eBPF implementation, the kernel will try to update the eBPF map
43 * entry with the tag request. And the hashmap update process is protected by
44 * the spinlock initialized with the map. So the behavior of two modules
45 * should be the same. No additional lock needed.
46 */
47 int tagSocket(int sockFd, uint32_t tag, uid_t chargeUid, uid_t realUid);
48
49 /*
50 * The untag process is similar to tag socket and both old qtaguid module and
51 * new eBPF module have spinlock inside the kernel for concurrent update. No
52 * external lock is required.
53 */
54 int untagSocket(int sockFd);
55
56 private:
57 // For testing
58 BpfHandler(uint32_t perUidLimit, uint32_t totalLimit);
59
60 netdutils::Status initMaps();
61 bool hasUpdateDeviceStatsPermission(uid_t uid);
62
63 BpfMap<uint64_t, UidTagValue> mCookieTagMap;
64 BpfMap<StatsKey, StatsValue> mStatsMapA;
Maciej Żenczykowskieb9b6fa2022-06-13 17:28:41 -070065 BpfMapRO<StatsKey, StatsValue> mStatsMapB;
Maciej Żenczykowski9017a072022-06-16 14:49:27 -070066 BpfMapRO<uint32_t, uint32_t> mConfigurationMap;
Ken Chen1647f602021-10-05 21:55:22 +080067 BpfMap<uint32_t, uint8_t> mUidPermissionMap;
68
69 std::mutex mMutex;
70
71 // The limit on the number of stats entries a uid can have in the per uid stats map. BpfHandler
72 // will block that specific uid from tagging new sockets after the limit is reached.
73 const uint32_t mPerUidStatsEntriesLimit;
74
75 // The limit on the total number of stats entries in the per uid stats map. BpfHandler will
76 // block all tagging requests after the limit is reached.
77 const uint32_t mTotalUidStatsEntriesLimit;
78
79 // For testing
80 friend class BpfHandlerTest;
81};
82
83} // namespace net
84} // namespace android