1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "NetworkTrace"
18 #define ATRACE_TAG ATRACE_TAG_NETWORK
19
20 #include "netdbpf/NetworkTracePoller.h"
21
22 #include <bpf/BpfUtils.h>
23 #include <cutils/trace.h>
24 #include <log/log.h>
25 #include <perfetto/tracing/platform.h>
26 #include <perfetto/tracing/tracing.h>
27
28 #include <unordered_map>
29 #include <unordered_set>
30
31 #include "netdbpf/BpfNetworkStats.h"
32
33 namespace android {
34 namespace bpf {
35 namespace internal {
36 using ::android::base::StringPrintf;
37
PollAndSchedule(perfetto::base::TaskRunner * runner,uint32_t poll_ms)38 void NetworkTracePoller::PollAndSchedule(perfetto::base::TaskRunner* runner,
39 uint32_t poll_ms) {
40 // Always schedule another run of ourselves to recursively poll periodically.
41 // The task runner is sequential so these can't run on top of each other.
42 runner->PostDelayedTask([=, this]() { PollAndSchedule(runner, poll_ms); }, poll_ms);
43
44 ConsumeAll();
45 }
46
Start(uint32_t pollMs)47 bool NetworkTracePoller::Start(uint32_t pollMs) {
48 ALOGD("Starting datasource");
49
50 std::scoped_lock<std::mutex> lock(mMutex);
51 if (mSessionCount > 0) {
52 if (mPollMs != pollMs) {
53 // Nothing technical prevents mPollMs from changing, it's just unclear
54 // what the right behavior is. Taking the min of active values could poll
55 // too frequently giving some sessions too much data. Taking the max could
56 // be too infrequent. For now, do nothing.
57 ALOGI("poll_ms can't be changed while running, ignoring poll_ms=%d",
58 pollMs);
59 }
60 mSessionCount++;
61 return true;
62 }
63
64 auto status = mConfigurationMap.init(PACKET_TRACE_ENABLED_MAP_PATH);
65 if (!status.ok()) {
66 ALOGW("Failed to bind config map: %s", status.error().message().c_str());
67 return false;
68 }
69
70 auto rb = BpfRingbuf<PacketTrace>::Create(PACKET_TRACE_RINGBUF_PATH);
71 if (!rb.ok()) {
72 ALOGW("Failed to create ringbuf: %s", rb.error().message().c_str());
73 return false;
74 }
75
76 {
77 std::scoped_lock<std::mutex> block(mBufferMutex);
78 mRingBuffer = std::move(*rb);
79 }
80
81 auto res = mConfigurationMap.writeValue(0, true, BPF_ANY);
82 if (!res.ok()) {
83 ALOGW("Failed to enable tracing: %s", res.error().message().c_str());
84 return false;
85 }
86
87 // Start a task runner to run ConsumeAll every mPollMs milliseconds.
88 mTaskRunner = perfetto::Platform::GetDefaultPlatform()->CreateTaskRunner({});
89 mPollMs = pollMs;
90 PollAndSchedule(mTaskRunner.get(), mPollMs);
91
92 mSessionCount++;
93 return true;
94 }
95
Stop()96 bool NetworkTracePoller::Stop() {
97 ALOGD("Stopping datasource");
98
99 std::scoped_lock<std::mutex> lock(mMutex);
100 if (mSessionCount == 0) return false; // This should never happen
101
102 // If this isn't the last session, don't clean up yet.
103 if (--mSessionCount > 0) return true;
104
105 auto res = mConfigurationMap.writeValue(0, false, BPF_ANY);
106 if (!res.ok()) {
107 ALOGW("Failed to disable tracing: %s", res.error().message().c_str());
108 }
109
110 // Make sure everything in the system has actually seen the 'false' we just
111 // wrote, things should now be well and truly disabled.
112 synchronizeKernelRCU();
113
114 // Drain remaining events from the ring buffer now that tracing is disabled.
115 // This prevents the next trace from seeing stale events and allows writing
116 // the last batch of events to Perfetto.
117 ConsumeAll();
118
119 mTaskRunner.reset();
120
121 {
122 std::scoped_lock<std::mutex> block(mBufferMutex);
123 mRingBuffer.reset();
124 }
125
126 return res.ok();
127 }
128
TraceIfaces(const std::vector<PacketTrace> & packets)129 void NetworkTracePoller::TraceIfaces(const std::vector<PacketTrace>& packets) {
130 if (packets.empty()) return;
131
132 std::unordered_set<uint32_t> uniqueIfindex;
133 for (const PacketTrace& pkt : packets) {
134 uniqueIfindex.insert(pkt.ifindex);
135 }
136
137 for (uint32_t ifindex : uniqueIfindex) {
138 char ifname[IF_NAMESIZE] = {};
139 if (if_indextoname(ifindex, ifname) != ifname) continue;
140
141 StatsValue stats = {};
142 if (bpfGetIfIndexStats(ifindex, &stats) != 0) continue;
143
144 std::string rxTrack = StringPrintf("%s [%d] Rx Bytes", ifname, ifindex);
145 std::string txTrack = StringPrintf("%s [%d] Tx Bytes", ifname, ifindex);
146 ATRACE_INT64(rxTrack.c_str(), stats.rxBytes);
147 ATRACE_INT64(txTrack.c_str(), stats.txBytes);
148 }
149 }
150
ConsumeAll()151 bool NetworkTracePoller::ConsumeAll() {
152 std::vector<PacketTrace> packets;
153 {
154 std::scoped_lock<std::mutex> lock(mBufferMutex);
155 if (mRingBuffer == nullptr) {
156 ALOGW("Tracing is not active");
157 return false;
158 }
159
160 base::Result<int> ret = mRingBuffer->ConsumeAll(
161 [&](const PacketTrace& pkt) { packets.push_back(pkt); });
162 if (!ret.ok()) {
163 ALOGW("Failed to poll ringbuf: %s", ret.error().message().c_str());
164 return false;
165 }
166 }
167
168 ATRACE_INT("NetworkTracePackets", packets.size());
169
170 TraceIfaces(packets);
171 mCallback(packets);
172
173 return true;
174 }
175
176 } // namespace internal
177 } // namespace bpf
178 } // namespace android
179