1 // Copyright 2017 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "components/metrics/persistent_system_profile.h"
6
7 #include <set>
8 #include <vector>
9
10 #include "base/atomicops.h"
11 #include "base/bits.h"
12 #include "base/containers/contains.h"
13 #include "base/containers/span.h"
14 #include "base/debug/crash_logging.h"
15 #include "base/memory/singleton.h"
16 #include "base/metrics/persistent_memory_allocator.h"
17 #include "base/notreached.h"
18 #include "base/pickle.h"
19 #include "components/variations/active_field_trials.h"
20
21 namespace metrics {
22
23 namespace {
24
25 // To provide atomic addition of records so that there is no confusion between
26 // writers and readers, all of the metadata about a record is contained in a
27 // structure that can be stored as a single atomic 32-bit word.
28 union RecordHeader {
29 struct {
30 unsigned continued : 1; // Flag indicating if there is more after this.
31 unsigned type : 7; // The type of this record.
32 unsigned amount : 24; // The amount of data to follow.
33 } as_parts;
34 base::subtle::Atomic32 as_atomic;
35 };
36
37 constexpr uint32_t kTypeIdSystemProfile = 0x330A7150; // SHA1(SystemProfile)
38 constexpr size_t kSystemProfileAllocSize = 4 << 10; // 4 KiB
39 constexpr size_t kMaxRecordSize = (1 << 24) - sizeof(RecordHeader);
40 constexpr char kFieldTrialDeletionSentinel[] = "";
41
42 static_assert(sizeof(RecordHeader) == sizeof(base::subtle::Atomic32),
43 "bad RecordHeader size");
44
45 // Calculate the size of a record based on the amount of data. This adds room
46 // for the record header and rounds up to the next multiple of the record-header
47 // size.
CalculateRecordSize(size_t data_amount)48 size_t CalculateRecordSize(size_t data_amount) {
49 return base::bits::AlignUp(data_amount + sizeof(RecordHeader),
50 sizeof(RecordHeader));
51 }
52
53 } // namespace
54
RecordAllocator(base::PersistentMemoryAllocator * memory_allocator,size_t min_size)55 PersistentSystemProfile::RecordAllocator::RecordAllocator(
56 base::PersistentMemoryAllocator* memory_allocator,
57 size_t min_size)
58 : allocator_(memory_allocator),
59 has_complete_profile_(false),
60 alloc_reference_(0),
61 alloc_size_(0),
62 end_offset_(0) {
63 AddSegment(min_size);
64 }
65
RecordAllocator(const base::PersistentMemoryAllocator * memory_allocator)66 PersistentSystemProfile::RecordAllocator::RecordAllocator(
67 const base::PersistentMemoryAllocator* memory_allocator)
68 : allocator_(
69 const_cast<base::PersistentMemoryAllocator*>(memory_allocator)),
70 alloc_reference_(0),
71 alloc_size_(0),
72 end_offset_(0) {}
73
Reset()74 void PersistentSystemProfile::RecordAllocator::Reset() {
75 // Clear the first word of all blocks so they're known to be "empty".
76 alloc_reference_ = 0;
77 while (NextSegment()) {
78 // Get the block as a char* and cast it. It can't be fetched directly as
79 // an array of RecordHeader because that's not a fundamental type and only
80 // arrays of fundamental types are allowed.
81 RecordHeader* header =
82 reinterpret_cast<RecordHeader*>(allocator_->GetAsArray<char>(
83 alloc_reference_, kTypeIdSystemProfile, sizeof(RecordHeader)));
84 DCHECK(header);
85 base::subtle::NoBarrier_Store(&header->as_atomic, 0);
86 }
87
88 // Reset member variables.
89 has_complete_profile_ = false;
90 alloc_reference_ = 0;
91 alloc_size_ = 0;
92 end_offset_ = 0;
93 }
94
Write(RecordType type,base::StringPiece record)95 bool PersistentSystemProfile::RecordAllocator::Write(RecordType type,
96 base::StringPiece record) {
97 const char* data = record.data();
98 size_t remaining_size = record.size();
99
100 // Allocate space and write records until everything has been stored.
101 do {
102 if (end_offset_ == alloc_size_) {
103 if (!AddSegment(remaining_size))
104 return false;
105 }
106 // Write out as much of the data as possible. |data| and |remaining_size|
107 // are updated in place.
108 if (!WriteData(type, &data, &remaining_size))
109 return false;
110 } while (remaining_size > 0);
111
112 return true;
113 }
114
HasMoreData() const115 bool PersistentSystemProfile::RecordAllocator::HasMoreData() const {
116 if (alloc_reference_ == 0 && !NextSegment())
117 return false;
118
119 char* block =
120 allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile,
121 base::PersistentMemoryAllocator::kSizeAny);
122 if (!block)
123 return false;
124
125 RecordHeader header;
126 header.as_atomic = base::subtle::Acquire_Load(
127 reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_));
128 return header.as_parts.type != kUnusedSpace;
129 }
130
Read(RecordType * type,std::string * record) const131 bool PersistentSystemProfile::RecordAllocator::Read(RecordType* type,
132 std::string* record) const {
133 *type = kUnusedSpace;
134 record->clear();
135
136 // Access data and read records until everything has been loaded.
137 while (true) {
138 if (end_offset_ == alloc_size_) {
139 if (!NextSegment())
140 return false;
141 }
142 if (ReadData(type, record))
143 return *type != kUnusedSpace;
144 }
145 }
146
NextSegment() const147 bool PersistentSystemProfile::RecordAllocator::NextSegment() const {
148 base::PersistentMemoryAllocator::Iterator iter(allocator_, alloc_reference_);
149 alloc_reference_ = iter.GetNextOfType(kTypeIdSystemProfile);
150 alloc_size_ = allocator_->GetAllocSize(alloc_reference_);
151 end_offset_ = 0;
152 return alloc_reference_ != 0;
153 }
154
AddSegment(size_t min_size)155 bool PersistentSystemProfile::RecordAllocator::AddSegment(size_t min_size) {
156 if (NextSegment()) {
157 // The first record-header should have been zeroed as part of the allocation
158 // or by the "reset" procedure.
159 DCHECK_EQ(0, base::subtle::NoBarrier_Load(
160 allocator_->GetAsArray<base::subtle::Atomic32>(
161 alloc_reference_, kTypeIdSystemProfile, 1)));
162 return true;
163 }
164
165 DCHECK_EQ(0U, alloc_reference_);
166 DCHECK_EQ(0U, end_offset_);
167
168 size_t size =
169 std::max(CalculateRecordSize(min_size), kSystemProfileAllocSize);
170
171 uint32_t ref = allocator_->Allocate(size, kTypeIdSystemProfile);
172 if (!ref)
173 return false; // Allocator must be full.
174 allocator_->MakeIterable(ref);
175
176 alloc_reference_ = ref;
177 alloc_size_ = allocator_->GetAllocSize(ref);
178 return true;
179 }
180
WriteData(RecordType type,const char ** data,size_t * data_size)181 bool PersistentSystemProfile::RecordAllocator::WriteData(RecordType type,
182 const char** data,
183 size_t* data_size) {
184 char* block =
185 allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile,
186 base::PersistentMemoryAllocator::kSizeAny);
187 if (!block)
188 return false; // It's bad if there is no accessible block.
189
190 const size_t max_write_size = std::min(
191 kMaxRecordSize, alloc_size_ - end_offset_ - sizeof(RecordHeader));
192 const size_t write_size = std::min(*data_size, max_write_size);
193 const size_t record_size = CalculateRecordSize(write_size);
194 DCHECK_LT(write_size, record_size);
195
196 // Write the data and the record header.
197 RecordHeader header;
198 header.as_atomic = 0;
199 header.as_parts.type = type;
200 header.as_parts.amount = write_size;
201 header.as_parts.continued = (write_size < *data_size);
202 size_t offset = end_offset_;
203 end_offset_ += record_size;
204 DCHECK_GE(alloc_size_, end_offset_);
205 if (end_offset_ < alloc_size_) {
206 // An empty record header has to be next before this one gets written.
207 base::subtle::NoBarrier_Store(
208 reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_), 0);
209 }
210 memcpy(block + offset + sizeof(header), *data, write_size);
211 base::subtle::Release_Store(
212 reinterpret_cast<base::subtle::Atomic32*>(block + offset),
213 header.as_atomic);
214
215 // Account for what was stored and prepare for follow-on records with any
216 // remaining data.
217 *data += write_size;
218 *data_size -= write_size;
219
220 return true;
221 }
222
ReadData(RecordType * type,std::string * record) const223 bool PersistentSystemProfile::RecordAllocator::ReadData(
224 RecordType* type,
225 std::string* record) const {
226 DCHECK_GT(alloc_size_, end_offset_);
227
228 char* block =
229 allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile,
230 base::PersistentMemoryAllocator::kSizeAny);
231 if (!block) {
232 *type = kUnusedSpace;
233 return true; // No more data.
234 }
235
236 // Get and validate the record header.
237 RecordHeader header;
238 header.as_atomic = base::subtle::Acquire_Load(
239 reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_));
240 bool continued = !!header.as_parts.continued;
241 if (header.as_parts.type == kUnusedSpace) {
242 *type = kUnusedSpace;
243 return true; // End of all records.
244 } else if (*type == kUnusedSpace) {
245 *type = static_cast<RecordType>(header.as_parts.type);
246 } else if (*type != header.as_parts.type) {
247 DUMP_WILL_BE_NOTREACHED_NORETURN(); // Continuation didn't match start of
248 // record.
249 *type = kUnusedSpace;
250 record->clear();
251 return false;
252 }
253 size_t read_size = header.as_parts.amount;
254 if (end_offset_ + sizeof(header) + read_size > alloc_size_) {
255 #if !BUILDFLAG(IS_NACL)
256 // TODO(crbug/1432981): Remove these. They are used to investigate
257 // unexpected failures.
258 SCOPED_CRASH_KEY_NUMBER("PersistentSystemProfile", "end_offset_",
259 end_offset_);
260 SCOPED_CRASH_KEY_NUMBER("PersistentSystemProfile", "read_size", read_size);
261 SCOPED_CRASH_KEY_NUMBER("PersistentSystemProfile", "alloc_size_",
262 alloc_size_);
263 #endif // !BUILDFLAG(IS_NACL)
264
265 DUMP_WILL_BE_NOTREACHED_NORETURN(); // Invalid header amount.
266 *type = kUnusedSpace;
267 return true; // Don't try again.
268 }
269
270 // Append the record data to the output string.
271 record->append(block + end_offset_ + sizeof(header), read_size);
272 end_offset_ += CalculateRecordSize(read_size);
273 DCHECK_GE(alloc_size_, end_offset_);
274
275 return !continued;
276 }
277
PersistentSystemProfile()278 PersistentSystemProfile::PersistentSystemProfile() {}
279
~PersistentSystemProfile()280 PersistentSystemProfile::~PersistentSystemProfile() {}
281
RegisterPersistentAllocator(base::PersistentMemoryAllocator * memory_allocator)282 void PersistentSystemProfile::RegisterPersistentAllocator(
283 base::PersistentMemoryAllocator* memory_allocator) {
284 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
285
286 // Create and store the allocator. A |min_size| of "1" ensures that a memory
287 // block is reserved now.
288 RecordAllocator allocator(memory_allocator, 1);
289 allocators_.push_back(std::move(allocator));
290 all_have_complete_profile_ = false;
291 }
292
DeregisterPersistentAllocator(base::PersistentMemoryAllocator * memory_allocator)293 void PersistentSystemProfile::DeregisterPersistentAllocator(
294 base::PersistentMemoryAllocator* memory_allocator) {
295 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
296
297 // This would be more efficient with a std::map but it's not expected that
298 // allocators will get deregistered with any frequency, if at all.
299 std::erase_if(allocators_, [=](RecordAllocator& records) {
300 return records.allocator() == memory_allocator;
301 });
302 }
303
SetSystemProfile(const std::string & serialized_profile,bool complete)304 void PersistentSystemProfile::SetSystemProfile(
305 const std::string& serialized_profile,
306 bool complete) {
307 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
308
309 if (allocators_.empty() || serialized_profile.empty())
310 return;
311
312 for (auto& allocator : allocators_) {
313 // Don't overwrite a complete profile with an incomplete one.
314 if (!complete && allocator.has_complete_profile())
315 continue;
316 // System profile always starts fresh.
317 allocator.Reset();
318 // Write out the serialized profile.
319 allocator.Write(kSystemProfileProto, serialized_profile);
320 // Indicate if this is a complete profile.
321 if (complete)
322 allocator.set_complete_profile();
323 }
324
325 if (complete)
326 all_have_complete_profile_ = true;
327 }
328
SetSystemProfile(const SystemProfileProto & profile,bool complete)329 void PersistentSystemProfile::SetSystemProfile(
330 const SystemProfileProto& profile,
331 bool complete) {
332 // Avoid serialization if passed profile is not complete and all allocators
333 // already have complete ones.
334 if (!complete && all_have_complete_profile_)
335 return;
336
337 std::string serialized_profile;
338 if (!profile.SerializeToString(&serialized_profile))
339 return;
340 SetSystemProfile(serialized_profile, complete);
341 }
342
AddFieldTrial(base::StringPiece trial,base::StringPiece group)343 void PersistentSystemProfile::AddFieldTrial(base::StringPiece trial,
344 base::StringPiece group) {
345 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
346 DCHECK(!trial.empty());
347
348 base::Pickle pickler;
349 pickler.WriteString(trial);
350 pickler.WriteString(group);
351
352 WriteToAll(kFieldTrialInfo,
353 base::StringPiece(pickler.data_as_char(), pickler.size()));
354 }
355
RemoveFieldTrial(base::StringPiece trial)356 void PersistentSystemProfile::RemoveFieldTrial(base::StringPiece trial) {
357 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
358 DCHECK(!trial.empty());
359
360 base::Pickle pickler;
361 pickler.WriteString(trial);
362 pickler.WriteString(kFieldTrialDeletionSentinel);
363
364 WriteToAll(kFieldTrialInfo,
365 base::StringPiece(pickler.data_as_char(), pickler.size()));
366 }
367 // static
HasSystemProfile(const base::PersistentMemoryAllocator & memory_allocator)368 bool PersistentSystemProfile::HasSystemProfile(
369 const base::PersistentMemoryAllocator& memory_allocator) {
370 const RecordAllocator records(&memory_allocator);
371 return records.HasMoreData();
372 }
373
374 // static
GetSystemProfile(const base::PersistentMemoryAllocator & memory_allocator,SystemProfileProto * system_profile)375 bool PersistentSystemProfile::GetSystemProfile(
376 const base::PersistentMemoryAllocator& memory_allocator,
377 SystemProfileProto* system_profile) {
378 const RecordAllocator records(&memory_allocator);
379
380 RecordType type;
381 std::string record;
382 do {
383 if (!records.Read(&type, &record))
384 return false;
385 } while (type != kSystemProfileProto);
386
387 if (!system_profile)
388 return true;
389
390 if (!system_profile->ParseFromString(record))
391 return false;
392
393 MergeUpdateRecords(memory_allocator, system_profile);
394 return true;
395 }
396
397 // static
MergeUpdateRecords(const base::PersistentMemoryAllocator & memory_allocator,SystemProfileProto * system_profile)398 void PersistentSystemProfile::MergeUpdateRecords(
399 const base::PersistentMemoryAllocator& memory_allocator,
400 SystemProfileProto* system_profile) {
401 const RecordAllocator records(&memory_allocator);
402
403 RecordType type;
404 std::string record;
405 std::map<uint32_t, uint32_t> field_trials;
406 bool updated = false;
407
408 // This is done separate from the code that gets the profile because it
409 // compartmentalizes the code and makes it possible to reuse this section
410 // should it be needed to merge "update" records into a new "complete"
411 // system profile that somehow didn't get all the updates.
412 while (records.Read(&type, &record)) {
413 switch (type) {
414 case kUnusedSpace:
415 // These should never be returned.
416 NOTREACHED();
417 break;
418
419 case kSystemProfileProto:
420 // Profile was passed in; ignore this one.
421 break;
422
423 case kFieldTrialInfo: {
424 // Get the set of known trial IDs so duplicates don't get added.
425 if (field_trials.empty()) {
426 for (int i = 0; i < system_profile->field_trial_size(); ++i) {
427 field_trials[system_profile->field_trial(i).name_id()] =
428 system_profile->field_trial(i).group_id();
429 }
430 }
431
432 base::Pickle pickler =
433 base::Pickle::WithUnownedBuffer(base::as_byte_span(record));
434 base::PickleIterator iter(pickler);
435 base::StringPiece trial;
436 base::StringPiece group;
437 if (iter.ReadStringPiece(&trial) && iter.ReadStringPiece(&group)) {
438 variations::ActiveGroupId field_ids =
439 variations::MakeActiveGroupId(trial, group);
440 if (group == kFieldTrialDeletionSentinel) {
441 field_trials.erase(field_ids.name);
442 } else {
443 field_trials[field_ids.name] = field_ids.group;
444 }
445 }
446 updated = true;
447 } break;
448 }
449 }
450
451 // Skip rewriting the field trials if there was no update.
452 if (!updated) {
453 return;
454 }
455
456 // Rewrite the full list of field trials to avoid duplicates.
457 system_profile->clear_field_trial();
458
459 for (const auto& trial : field_trials) {
460 SystemProfileProto::FieldTrial* field_trial =
461 system_profile->add_field_trial();
462 field_trial->set_name_id(trial.first);
463 field_trial->set_group_id(trial.second);
464 }
465 }
466
WriteToAll(RecordType type,base::StringPiece record)467 void PersistentSystemProfile::WriteToAll(RecordType type,
468 base::StringPiece record) {
469 for (auto& allocator : allocators_)
470 allocator.Write(type, record);
471 }
472
GetInstance()473 GlobalPersistentSystemProfile* GlobalPersistentSystemProfile::GetInstance() {
474 return base::Singleton<
475 GlobalPersistentSystemProfile,
476 base::LeakySingletonTraits<GlobalPersistentSystemProfile>>::get();
477 }
478
479 } // namespace metrics
480