xref: /aosp_15_r20/external/perfetto/src/trace_processor/importers/proto/profile_packet_sequence_state.cc (revision 6dbdd20afdafa5e3ca9b8809fa73465d530080dc)
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/trace_processor/importers/proto/profile_packet_sequence_state.h"
18 
19 #include "perfetto/base/flat_set.h"
20 #include "perfetto/base/logging.h"
21 #include "perfetto/ext/base/string_view.h"
22 #include "src/trace_processor/importers/common/address_range.h"
23 #include "src/trace_processor/importers/common/mapping_tracker.h"
24 #include "src/trace_processor/importers/common/process_tracker.h"
25 #include "src/trace_processor/importers/common/stack_profile_tracker.h"
26 #include "src/trace_processor/importers/proto/packet_sequence_state_generation.h"
27 #include "src/trace_processor/importers/proto/profile_packet_utils.h"
28 #include "src/trace_processor/importers/proto/stack_profile_sequence_state.h"
29 #include "src/trace_processor/storage/stats.h"
30 #include "src/trace_processor/storage/trace_storage.h"
31 #include "src/trace_processor/types/trace_processor_context.h"
32 #include "src/trace_processor/util/build_id.h"
33 
34 namespace perfetto {
35 namespace trace_processor {
36 namespace {
37 const char kArtHeapName[] = "com.android.art";
38 }
39 
ProfilePacketSequenceState(TraceProcessorContext * context)40 ProfilePacketSequenceState::ProfilePacketSequenceState(
41     TraceProcessorContext* context)
42     : context_(context) {
43   strings_.Insert(0, "");
44 }
45 
46 ProfilePacketSequenceState::~ProfilePacketSequenceState() = default;
47 
SetProfilePacketIndex(uint64_t index)48 void ProfilePacketSequenceState::SetProfilePacketIndex(uint64_t index) {
49   bool dropped_packet = false;
50   // heapprofd starts counting at index = 0.
51   if (!prev_index.has_value() && index != 0) {
52     dropped_packet = true;
53   }
54 
55   if (prev_index.has_value() && *prev_index + 1 != index) {
56     dropped_packet = true;
57   }
58 
59   if (dropped_packet) {
60     context_->storage->IncrementStats(stats::heapprofd_missing_packet);
61   }
62   prev_index = index;
63 }
64 
AddString(SourceStringId id,base::StringView str)65 void ProfilePacketSequenceState::AddString(SourceStringId id,
66                                            base::StringView str) {
67   PERFETTO_CHECK(id != 0 || str.empty());
68   strings_.Insert(id, str.ToStdString());
69 }
70 
AddMapping(SourceMappingId id,const SourceMapping & mapping)71 void ProfilePacketSequenceState::AddMapping(SourceMappingId id,
72                                             const SourceMapping& mapping) {
73   CreateMappingParams params;
74   if (std::string* str = strings_.Find(mapping.build_id); str) {
75     params.build_id = BuildId::FromRaw(*str);
76   } else {
77     context_->storage->IncrementStats(stats::stackprofile_invalid_string_id);
78     return;
79   }
80   params.exact_offset = mapping.exact_offset;
81   params.start_offset = mapping.start_offset;
82   params.memory_range = AddressRange(mapping.start, mapping.end);
83   params.load_bias = mapping.load_bias;
84 
85   std::vector<base::StringView> path_components;
86   path_components.reserve(mapping.name_ids.size());
87   for (SourceStringId string_id : mapping.name_ids) {
88     if (std::string* str = strings_.Find(string_id); str) {
89       path_components.push_back(base::StringView(*str));
90     } else {
91       context_->storage->IncrementStats(stats::stackprofile_invalid_string_id);
92       // For backward compatibility reasons we do not return an error but
93       // instead stop adding path components.
94       break;
95     }
96   }
97 
98   params.name = ProfilePacketUtils::MakeMappingName(path_components);
99   mappings_.Insert(
100       id, &context_->mapping_tracker->InternMemoryMapping(std::move(params)));
101 }
102 
AddFrame(SourceFrameId id,const SourceFrame & frame)103 void ProfilePacketSequenceState::AddFrame(SourceFrameId id,
104                                           const SourceFrame& frame) {
105   VirtualMemoryMapping* mapping;
106   if (auto* ptr = mappings_.Find(frame.mapping_id); ptr) {
107     mapping = *ptr;
108   } else {
109     context_->storage->IncrementStats(stats::stackprofile_invalid_mapping_id);
110     return;
111   }
112 
113   std::string* function_name = strings_.Find(frame.name_id);
114   if (!function_name) {
115     context_->storage->IncrementStats(stats::stackprofile_invalid_string_id);
116     return;
117   }
118 
119   FrameId frame_id =
120       mapping->InternFrame(frame.rel_pc, base::StringView(*function_name));
121   PERFETTO_CHECK(!mapping->is_jitted());
122   frames_.Insert(id, frame_id);
123 }
124 
AddCallstack(SourceCallstackId id,const SourceCallstack & callstack)125 void ProfilePacketSequenceState::AddCallstack(
126     SourceCallstackId id,
127     const SourceCallstack& callstack) {
128   std::optional<CallsiteId> parent_callsite_id;
129   uint32_t depth = 0;
130   for (SourceFrameId source_frame_id : callstack) {
131     FrameId* frame_id = frames_.Find(source_frame_id);
132     if (!frame_id) {
133       context_->storage->IncrementStats(stats::stackprofile_invalid_frame_id);
134       return;
135     }
136     parent_callsite_id = context_->stack_profile_tracker->InternCallsite(
137         parent_callsite_id, *frame_id, depth);
138     ++depth;
139   }
140 
141   if (!parent_callsite_id) {
142     context_->storage->IncrementStats(stats::stackprofile_empty_callstack);
143     return;
144   }
145 
146   callstacks_.Insert(id, *parent_callsite_id);
147 }
148 
StoreAllocation(const SourceAllocation & alloc)149 void ProfilePacketSequenceState::StoreAllocation(
150     const SourceAllocation& alloc) {
151   pending_allocs_.push_back(std::move(alloc));
152 }
153 
CommitAllocations()154 void ProfilePacketSequenceState::CommitAllocations() {
155   for (const SourceAllocation& alloc : pending_allocs_)
156     AddAllocation(alloc);
157   pending_allocs_.clear();
158 }
159 
FinalizeProfile()160 void ProfilePacketSequenceState::FinalizeProfile() {
161   CommitAllocations();
162   strings_.Clear();
163   mappings_.Clear();
164   frames_.Clear();
165   callstacks_.Clear();
166 }
167 
GetDatabaseFrameIdForTesting(SourceFrameId source_frame_id)168 FrameId ProfilePacketSequenceState::GetDatabaseFrameIdForTesting(
169     SourceFrameId source_frame_id) {
170   FrameId* frame_id = frames_.Find(source_frame_id);
171   if (!frame_id) {
172     PERFETTO_DLOG("Invalid frame.");
173     return {};
174   }
175   return *frame_id;
176 }
177 
AddAllocation(const SourceAllocation & alloc)178 void ProfilePacketSequenceState::AddAllocation(const SourceAllocation& alloc) {
179   const UniquePid upid = context_->process_tracker->GetOrCreateProcess(
180       static_cast<uint32_t>(alloc.pid));
181   auto opt_callstack_id = FindOrInsertCallstack(upid, alloc.callstack_id);
182   if (!opt_callstack_id)
183     return;
184 
185   CallsiteId callstack_id = *opt_callstack_id;
186 
187   tables::HeapProfileAllocationTable::Row alloc_row{
188       alloc.timestamp,
189       upid,
190       alloc.heap_name,
191       callstack_id,
192       static_cast<int64_t>(alloc.alloc_count),
193       static_cast<int64_t>(alloc.self_allocated)};
194 
195   tables::HeapProfileAllocationTable::Row free_row{
196       alloc.timestamp,
197       upid,
198       alloc.heap_name,
199       callstack_id,
200       -static_cast<int64_t>(alloc.free_count),
201       -static_cast<int64_t>(alloc.self_freed)};
202 
203   auto* prev_alloc = prev_alloc_.Find({upid, callstack_id});
204   if (!prev_alloc) {
205     prev_alloc = prev_alloc_
206                      .Insert(std::make_pair(upid, callstack_id),
207                              tables::HeapProfileAllocationTable::Row{})
208                      .first;
209   }
210 
211   auto* prev_free = prev_free_.Find({upid, callstack_id});
212   if (!prev_free) {
213     prev_free = prev_free_
214                     .Insert(std::make_pair(upid, callstack_id),
215                             tables::HeapProfileAllocationTable::Row{})
216                     .first;
217   }
218 
219   base::FlatSet<CallsiteId>& callstacks_for_source_callstack_id =
220       seen_callstacks_[SourceAllocationIndex{upid, alloc.callstack_id,
221                                              alloc.heap_name}];
222   bool new_callstack;
223   std::tie(std::ignore, new_callstack) =
224       callstacks_for_source_callstack_id.insert(callstack_id);
225 
226   if (new_callstack) {
227     alloc_correction_[alloc.callstack_id] = *prev_alloc;
228     free_correction_[alloc.callstack_id] = *prev_free;
229   }
230 
231   const auto* alloc_correction = alloc_correction_.Find(alloc.callstack_id);
232   if (alloc_correction) {
233     alloc_row.count += alloc_correction->count;
234     alloc_row.size += alloc_correction->size;
235   }
236 
237   const auto* free_correction = free_correction_.Find(alloc.callstack_id);
238   if (free_correction) {
239     free_row.count += free_correction->count;
240     free_row.size += free_correction->size;
241   }
242 
243   tables::HeapProfileAllocationTable::Row alloc_delta = alloc_row;
244   tables::HeapProfileAllocationTable::Row free_delta = free_row;
245 
246   alloc_delta.count -= prev_alloc->count;
247   alloc_delta.size -= prev_alloc->size;
248 
249   free_delta.count -= prev_free->count;
250   free_delta.size -= prev_free->size;
251 
252   if (alloc_delta.count < 0 || alloc_delta.size < 0 || free_delta.count > 0 ||
253       free_delta.size > 0) {
254     PERFETTO_DLOG("Non-monotonous allocation.");
255     context_->storage->IncrementIndexedStats(stats::heapprofd_malformed_packet,
256                                              static_cast<int>(upid));
257     return;
258   }
259 
260   // Dump at max profiles do not have .count set.
261   if (alloc_delta.count || alloc_delta.size) {
262     context_->storage->mutable_heap_profile_allocation_table()->Insert(
263         alloc_delta);
264   }
265 
266   // ART only reports allocations, and not frees. This throws off our logic
267   // that assumes that if a new object was allocated with the same address,
268   // the old one has to have been freed in the meantime.
269   // See HeapTracker::RecordMalloc in bookkeeping.cc.
270   if (context_->storage->GetString(alloc.heap_name) != kArtHeapName &&
271       (free_delta.count || free_delta.size)) {
272     context_->storage->mutable_heap_profile_allocation_table()->Insert(
273         free_delta);
274   }
275 
276   *prev_alloc = alloc_row;
277   *prev_free = free_row;
278 }
279 
FindOrInsertCallstack(UniquePid upid,uint64_t iid)280 std::optional<CallsiteId> ProfilePacketSequenceState::FindOrInsertCallstack(
281     UniquePid upid,
282     uint64_t iid) {
283   if (CallsiteId* id = callstacks_.Find(iid); id) {
284     return *id;
285   }
286   return GetCustomState<StackProfileSequenceState>()->FindOrInsertCallstack(
287       upid, iid);
288 }
289 
290 }  // namespace trace_processor
291 }  // namespace perfetto
292