1 // Copyright 2010 Google LLC
2 //
3 // Redistribution and use in source and binary forms, with or without
4 // modification, are permitted provided that the following conditions are
5 // met:
6 //
7 // * Redistributions of source code must retain the above copyright
8 // notice, this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above
10 // copyright notice, this list of conditions and the following disclaimer
11 // in the documentation and/or other materials provided with the
12 // distribution.
13 // * Neither the name of Google LLC nor the names of its
14 // contributors may be used to endorse or promote products derived from
15 // this software without specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 // This code writes out minidump files:
30 // http://msdn.microsoft.com/en-us/library/ms680378(VS.85,loband).aspx
31 //
32 // Minidumps are a Microsoft format which Breakpad uses for recording crash
33 // dumps. This code has to run in a compromised environment (the address space
34 // may have received SIGSEGV), thus the following rules apply:
35 // * You may not enter the dynamic linker. This means that we cannot call
36 // any symbols in a shared library (inc libc). Because of this we replace
37 // libc functions in linux_libc_support.h.
38 // * You may not call syscalls via the libc wrappers. This rule is a subset
39 // of the first rule but it bears repeating. We have direct wrappers
40 // around the system calls in linux_syscall_support.h.
41 // * You may not malloc. There's an alternative allocator in memory.h and
42 // a canonical instance in the LinuxDumper object. We use the placement
43 // new form to allocate objects and we don't delete them.
44
45 #ifdef HAVE_CONFIG_H
46 #include <config.h> // Must come first
47 #endif
48
49 #include "client/linux/handler/minidump_descriptor.h"
50 #include "client/linux/minidump_writer/minidump_writer.h"
51 #include "client/minidump_file_writer-inl.h"
52
53 #include <ctype.h>
54 #include <errno.h>
55 #include <fcntl.h>
56 #include <link.h>
57 #include <stdio.h>
58 #if defined(__ANDROID__)
59 #include <sys/system_properties.h>
60 #endif
61 #include <sys/types.h>
62 #include <sys/ucontext.h>
63 #include <sys/user.h>
64 #include <sys/utsname.h>
65 #include <time.h>
66 #include <unistd.h>
67
68 #include <algorithm>
69
70 #include "client/linux/dump_writer_common/thread_info.h"
71 #include "client/linux/dump_writer_common/ucontext_reader.h"
72 #include "client/linux/handler/exception_handler.h"
73 #include "client/linux/minidump_writer/cpu_set.h"
74 #include "client/linux/minidump_writer/line_reader.h"
75 #include "client/linux/minidump_writer/linux_dumper.h"
76 #include "client/linux/minidump_writer/linux_ptrace_dumper.h"
77 #include "client/linux/minidump_writer/pe_file.h"
78 #include "client/linux/minidump_writer/pe_structs.h"
79 #include "client/linux/minidump_writer/proc_cpuinfo_reader.h"
80 #include "client/minidump_file_writer.h"
81 #include "common/linux/file_id.h"
82 #include "common/linux/linux_libc_support.h"
83 #include "common/minidump_type_helper.h"
84 #include "google_breakpad/common/minidump_format.h"
85 #include "third_party/lss/linux_syscall_support.h"
86
87 namespace {
88
89 using google_breakpad::AppMemoryList;
90 using google_breakpad::auto_wasteful_vector;
91 using google_breakpad::elf::kDefaultBuildIdSize;
92 using google_breakpad::ExceptionHandler;
93 using google_breakpad::CpuSet;
94 using google_breakpad::LineReader;
95 using google_breakpad::LinuxDumper;
96 using google_breakpad::LinuxPtraceDumper;
97 using google_breakpad::MDTypeHelper;
98 using google_breakpad::MappingEntry;
99 using google_breakpad::MappingInfo;
100 using google_breakpad::MappingList;
101 using google_breakpad::MinidumpFileWriter;
102 using google_breakpad::PageAllocator;
103 using google_breakpad::PEFile;
104 using google_breakpad::PEFileFormat;
105 using google_breakpad::ProcCpuInfoReader;
106 using google_breakpad::RawContextCPU;
107 using google_breakpad::RSDS_DEBUG_FORMAT;
108 using google_breakpad::ThreadInfo;
109 using google_breakpad::TypedMDRVA;
110 using google_breakpad::UContextReader;
111 using google_breakpad::UntypedMDRVA;
112 using google_breakpad::wasteful_vector;
113
114 typedef MDTypeHelper<sizeof(void*)>::MDRawDebug MDRawDebug;
115 typedef MDTypeHelper<sizeof(void*)>::MDRawLinkMap MDRawLinkMap;
116
117 class MinidumpWriter {
118 public:
119 // The following kLimit* constants are for when minidump_size_limit_ is set
120 // and the minidump size might exceed it.
121 //
122 // Estimate for how big each thread's stack will be (in bytes).
123 static const unsigned kLimitAverageThreadStackLength = 8 * 1024;
124 // Number of threads whose stack size we don't want to limit. These base
125 // threads will simply be the first N threads returned by the dumper (although
126 // the crashing thread will never be limited). Threads beyond this count are
127 // the extra threads.
128 static const unsigned kLimitBaseThreadCount = 20;
129 // Maximum stack size to dump for any extra thread (in bytes).
130 static const unsigned kLimitMaxExtraThreadStackLen = 2 * 1024;
131 // Make sure this number of additional bytes can fit in the minidump
132 // (exclude the stack data).
133 static const unsigned kLimitMinidumpFudgeFactor = 64 * 1024;
134
MinidumpWriter(const char * minidump_path,int minidump_fd,const ExceptionHandler::CrashContext * context,const MappingList & mappings,const AppMemoryList & appmem,bool skip_stacks_if_mapping_unreferenced,uintptr_t principal_mapping_address,bool sanitize_stacks,LinuxDumper * dumper)135 MinidumpWriter(const char* minidump_path,
136 int minidump_fd,
137 const ExceptionHandler::CrashContext* context,
138 const MappingList& mappings,
139 const AppMemoryList& appmem,
140 bool skip_stacks_if_mapping_unreferenced,
141 uintptr_t principal_mapping_address,
142 bool sanitize_stacks,
143 LinuxDumper* dumper)
144 : fd_(minidump_fd),
145 path_(minidump_path),
146 ucontext_(context ? &context->context : NULL),
147 #if GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE
148 float_state_(context ? &context->float_state : NULL),
149 #endif
150 dumper_(dumper),
151 minidump_size_limit_(-1),
152 memory_blocks_(dumper_->allocator()),
153 mapping_list_(mappings),
154 app_memory_list_(appmem),
155 skip_stacks_if_mapping_unreferenced_(
156 skip_stacks_if_mapping_unreferenced),
157 principal_mapping_address_(principal_mapping_address),
158 principal_mapping_(nullptr),
159 sanitize_stacks_(sanitize_stacks) {
160 // Assert there should be either a valid fd or a valid path, not both.
161 assert(fd_ != -1 || minidump_path);
162 assert(fd_ == -1 || !minidump_path);
163 }
164
Init()165 bool Init() {
166 if (!dumper_->Init())
167 return false;
168
169 if (!dumper_->ThreadsSuspend() || !dumper_->LateInit())
170 return false;
171
172 if (skip_stacks_if_mapping_unreferenced_) {
173 principal_mapping_ =
174 dumper_->FindMappingNoBias(principal_mapping_address_);
175 if (!CrashingThreadReferencesPrincipalMapping())
176 return false;
177 }
178
179 if (fd_ != -1)
180 minidump_writer_.SetFile(fd_);
181 else if (!minidump_writer_.Open(path_))
182 return false;
183
184 return true;
185 }
186
~MinidumpWriter()187 ~MinidumpWriter() {
188 // Don't close the file descriptor when it's been provided explicitly.
189 // Callers might still need to use it.
190 if (fd_ == -1)
191 minidump_writer_.Close();
192 dumper_->ThreadsResume();
193 }
194
CrashingThreadReferencesPrincipalMapping()195 bool CrashingThreadReferencesPrincipalMapping() {
196 if (!ucontext_ || !principal_mapping_)
197 return false;
198
199 const uintptr_t low_addr =
200 principal_mapping_->system_mapping_info.start_addr;
201 const uintptr_t high_addr =
202 principal_mapping_->system_mapping_info.end_addr;
203
204 const uintptr_t stack_pointer = UContextReader::GetStackPointer(ucontext_);
205 const uintptr_t pc = UContextReader::GetInstructionPointer(ucontext_);
206
207 if (pc >= low_addr && pc < high_addr)
208 return true;
209
210 uint8_t* stack_copy;
211 const void* stack;
212 size_t stack_len;
213
214 if (!dumper_->GetStackInfo(&stack, &stack_len, stack_pointer))
215 return false;
216
217 stack_copy = reinterpret_cast<uint8_t*>(Alloc(stack_len));
218 dumper_->CopyFromProcess(stack_copy, GetCrashThread(), stack, stack_len);
219
220 uintptr_t stack_pointer_offset =
221 stack_pointer - reinterpret_cast<uintptr_t>(stack);
222
223 return dumper_->StackHasPointerToMapping(
224 stack_copy, stack_len, stack_pointer_offset, *principal_mapping_);
225 }
226
Dump()227 bool Dump() {
228 // A minidump file contains a number of tagged streams. This is the number
229 // of stream which we write.
230 unsigned kNumWriters = 13;
231
232 TypedMDRVA<MDRawDirectory> dir(&minidump_writer_);
233 {
234 // Ensure the header gets flushed, as that happens in the destructor.
235 // If a crash occurs somewhere below, at least the header will be
236 // intact.
237 TypedMDRVA<MDRawHeader> header(&minidump_writer_);
238 if (!header.Allocate())
239 return false;
240
241 if (!dir.AllocateArray(kNumWriters))
242 return false;
243
244 my_memset(header.get(), 0, sizeof(MDRawHeader));
245
246 header.get()->signature = MD_HEADER_SIGNATURE;
247 header.get()->version = MD_HEADER_VERSION;
248 header.get()->time_date_stamp = time(NULL);
249 header.get()->stream_count = kNumWriters;
250 header.get()->stream_directory_rva = dir.position();
251 }
252
253 unsigned dir_index = 0;
254 MDRawDirectory dirent;
255
256 if (!WriteThreadListStream(&dirent))
257 return false;
258 dir.CopyIndex(dir_index++, &dirent);
259
260 if (!WriteMappings(&dirent))
261 return false;
262 dir.CopyIndex(dir_index++, &dirent);
263
264 if (!WriteAppMemory())
265 return false;
266
267 if (!WriteMemoryListStream(&dirent))
268 return false;
269 dir.CopyIndex(dir_index++, &dirent);
270
271 if (!WriteExceptionStream(&dirent))
272 return false;
273 dir.CopyIndex(dir_index++, &dirent);
274
275 if (!WriteSystemInfoStream(&dirent))
276 return false;
277 dir.CopyIndex(dir_index++, &dirent);
278
279 dirent.stream_type = MD_LINUX_CPU_INFO;
280 if (!WriteFile(&dirent.location, "/proc/cpuinfo"))
281 NullifyDirectoryEntry(&dirent);
282 dir.CopyIndex(dir_index++, &dirent);
283
284 dirent.stream_type = MD_LINUX_PROC_STATUS;
285 if (!WriteProcFile(&dirent.location, GetCrashThread(), "status"))
286 NullifyDirectoryEntry(&dirent);
287 dir.CopyIndex(dir_index++, &dirent);
288
289 dirent.stream_type = MD_LINUX_LSB_RELEASE;
290 if (!WriteFile(&dirent.location, "/etc/lsb-release"))
291 NullifyDirectoryEntry(&dirent);
292 dir.CopyIndex(dir_index++, &dirent);
293
294 dirent.stream_type = MD_LINUX_CMD_LINE;
295 if (!WriteProcFile(&dirent.location, GetCrashThread(), "cmdline"))
296 NullifyDirectoryEntry(&dirent);
297 dir.CopyIndex(dir_index++, &dirent);
298
299 dirent.stream_type = MD_LINUX_ENVIRON;
300 if (!WriteProcFile(&dirent.location, GetCrashThread(), "environ"))
301 NullifyDirectoryEntry(&dirent);
302 dir.CopyIndex(dir_index++, &dirent);
303
304 dirent.stream_type = MD_LINUX_AUXV;
305 if (!WriteProcFile(&dirent.location, GetCrashThread(), "auxv"))
306 NullifyDirectoryEntry(&dirent);
307 dir.CopyIndex(dir_index++, &dirent);
308
309 dirent.stream_type = MD_LINUX_MAPS;
310 if (!WriteProcFile(&dirent.location, GetCrashThread(), "maps"))
311 NullifyDirectoryEntry(&dirent);
312 dir.CopyIndex(dir_index++, &dirent);
313
314 dirent.stream_type = MD_LINUX_DSO_DEBUG;
315 if (!WriteDSODebugStream(&dirent))
316 NullifyDirectoryEntry(&dirent);
317 dir.CopyIndex(dir_index++, &dirent);
318
319 // If you add more directory entries, don't forget to update kNumWriters,
320 // above.
321
322 dumper_->ThreadsResume();
323 return true;
324 }
325
FillThreadStack(MDRawThread * thread,uintptr_t stack_pointer,uintptr_t pc,int max_stack_len,uint8_t ** stack_copy)326 bool FillThreadStack(MDRawThread* thread, uintptr_t stack_pointer,
327 uintptr_t pc, int max_stack_len, uint8_t** stack_copy) {
328 *stack_copy = NULL;
329 const void* stack;
330 size_t stack_len;
331
332 thread->stack.start_of_memory_range = stack_pointer;
333 thread->stack.memory.data_size = 0;
334 thread->stack.memory.rva = minidump_writer_.position();
335
336 if (dumper_->GetStackInfo(&stack, &stack_len, stack_pointer)) {
337 if (max_stack_len >= 0 &&
338 stack_len > static_cast<unsigned int>(max_stack_len)) {
339 stack_len = max_stack_len;
340 // Skip empty chunks of length max_stack_len.
341 uintptr_t int_stack = reinterpret_cast<uintptr_t>(stack);
342 if (max_stack_len > 0) {
343 while (int_stack + max_stack_len < stack_pointer) {
344 int_stack += max_stack_len;
345 }
346 }
347 stack = reinterpret_cast<const void*>(int_stack);
348 }
349 *stack_copy = reinterpret_cast<uint8_t*>(Alloc(stack_len));
350 dumper_->CopyFromProcess(*stack_copy, thread->thread_id, stack,
351 stack_len);
352
353 uintptr_t stack_pointer_offset =
354 stack_pointer - reinterpret_cast<uintptr_t>(stack);
355 if (skip_stacks_if_mapping_unreferenced_) {
356 if (!principal_mapping_) {
357 return true;
358 }
359 uintptr_t low_addr = principal_mapping_->system_mapping_info.start_addr;
360 uintptr_t high_addr = principal_mapping_->system_mapping_info.end_addr;
361 if ((pc < low_addr || pc > high_addr) &&
362 !dumper_->StackHasPointerToMapping(*stack_copy, stack_len,
363 stack_pointer_offset,
364 *principal_mapping_)) {
365 return true;
366 }
367 }
368
369 if (sanitize_stacks_) {
370 dumper_->SanitizeStackCopy(*stack_copy, stack_len, stack_pointer,
371 stack_pointer_offset);
372 }
373
374 UntypedMDRVA memory(&minidump_writer_);
375 if (!memory.Allocate(stack_len))
376 return false;
377 memory.Copy(*stack_copy, stack_len);
378 thread->stack.start_of_memory_range = reinterpret_cast<uintptr_t>(stack);
379 thread->stack.memory = memory.location();
380 memory_blocks_.push_back(thread->stack);
381 }
382 return true;
383 }
384
385 // Write information about the threads.
WriteThreadListStream(MDRawDirectory * dirent)386 bool WriteThreadListStream(MDRawDirectory* dirent) {
387 const unsigned num_threads = dumper_->threads().size();
388
389 TypedMDRVA<uint32_t> list(&minidump_writer_);
390 if (!list.AllocateObjectAndArray(num_threads, sizeof(MDRawThread)))
391 return false;
392
393 dirent->stream_type = MD_THREAD_LIST_STREAM;
394 dirent->location = list.location();
395
396 *list.get() = num_threads;
397
398 // If there's a minidump size limit, check if it might be exceeded. Since
399 // most of the space is filled with stack data, just check against that.
400 // If this expects to exceed the limit, set extra_thread_stack_len such
401 // that any thread beyond the first kLimitBaseThreadCount threads will
402 // have only kLimitMaxExtraThreadStackLen bytes dumped.
403 int extra_thread_stack_len = -1; // default to no maximum
404 if (minidump_size_limit_ >= 0) {
405 const unsigned estimated_total_stack_size = num_threads *
406 kLimitAverageThreadStackLength;
407 const off_t estimated_minidump_size = minidump_writer_.position() +
408 estimated_total_stack_size + kLimitMinidumpFudgeFactor;
409 if (estimated_minidump_size > minidump_size_limit_)
410 extra_thread_stack_len = kLimitMaxExtraThreadStackLen;
411 }
412
413 for (unsigned i = 0; i < num_threads; ++i) {
414 MDRawThread thread;
415 my_memset(&thread, 0, sizeof(thread));
416 thread.thread_id = dumper_->threads()[i];
417
418 // We have a different source of information for the crashing thread. If
419 // we used the actual state of the thread we would find it running in the
420 // signal handler with the alternative stack, which would be deeply
421 // unhelpful.
422 if (static_cast<pid_t>(thread.thread_id) == GetCrashThread() &&
423 ucontext_ &&
424 !dumper_->IsPostMortem()) {
425 uint8_t* stack_copy;
426 const uintptr_t stack_ptr = UContextReader::GetStackPointer(ucontext_);
427 if (!FillThreadStack(&thread, stack_ptr,
428 UContextReader::GetInstructionPointer(ucontext_),
429 -1, &stack_copy))
430 return false;
431
432 // Copy 256 bytes around crashing instruction pointer to minidump.
433 const size_t kIPMemorySize = 256;
434 uint64_t ip = UContextReader::GetInstructionPointer(ucontext_);
435 // Bound it to the upper and lower bounds of the memory map
436 // it's contained within. If it's not in mapped memory,
437 // don't bother trying to write it.
438 bool ip_is_mapped = false;
439 MDMemoryDescriptor ip_memory_d;
440 for (unsigned j = 0; j < dumper_->mappings().size(); ++j) {
441 const MappingInfo& mapping = *dumper_->mappings()[j];
442 if (ip >= mapping.start_addr &&
443 ip < mapping.start_addr + mapping.size) {
444 ip_is_mapped = true;
445 // Try to get 128 bytes before and after the IP, but
446 // settle for whatever's available.
447 ip_memory_d.start_of_memory_range =
448 std::max(mapping.start_addr,
449 uintptr_t(ip - (kIPMemorySize / 2)));
450 uintptr_t end_of_range =
451 std::min(uintptr_t(ip + (kIPMemorySize / 2)),
452 uintptr_t(mapping.start_addr + mapping.size));
453 ip_memory_d.memory.data_size =
454 end_of_range - ip_memory_d.start_of_memory_range;
455 break;
456 }
457 }
458
459 if (ip_is_mapped) {
460 UntypedMDRVA ip_memory(&minidump_writer_);
461 if (!ip_memory.Allocate(ip_memory_d.memory.data_size))
462 return false;
463 uint8_t* memory_copy =
464 reinterpret_cast<uint8_t*>(Alloc(ip_memory_d.memory.data_size));
465 dumper_->CopyFromProcess(
466 memory_copy,
467 thread.thread_id,
468 reinterpret_cast<void*>(ip_memory_d.start_of_memory_range),
469 ip_memory_d.memory.data_size);
470 ip_memory.Copy(memory_copy, ip_memory_d.memory.data_size);
471 ip_memory_d.memory = ip_memory.location();
472 memory_blocks_.push_back(ip_memory_d);
473 }
474
475 TypedMDRVA<RawContextCPU> cpu(&minidump_writer_);
476 if (!cpu.Allocate())
477 return false;
478 my_memset(cpu.get(), 0, sizeof(RawContextCPU));
479 #if GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE
480 UContextReader::FillCPUContext(cpu.get(), ucontext_, float_state_);
481 #else
482 UContextReader::FillCPUContext(cpu.get(), ucontext_);
483 #endif
484 thread.thread_context = cpu.location();
485 crashing_thread_context_ = cpu.location();
486 } else {
487 ThreadInfo info;
488 if (!dumper_->GetThreadInfoByIndex(i, &info))
489 return false;
490
491 uint8_t* stack_copy;
492 int max_stack_len = -1; // default to no maximum for this thread
493 if (minidump_size_limit_ >= 0 && i >= kLimitBaseThreadCount)
494 max_stack_len = extra_thread_stack_len;
495 if (!FillThreadStack(&thread, info.stack_pointer,
496 info.GetInstructionPointer(), max_stack_len,
497 &stack_copy))
498 return false;
499
500 TypedMDRVA<RawContextCPU> cpu(&minidump_writer_);
501 if (!cpu.Allocate())
502 return false;
503 my_memset(cpu.get(), 0, sizeof(RawContextCPU));
504 info.FillCPUContext(cpu.get());
505 thread.thread_context = cpu.location();
506 if (dumper_->threads()[i] == GetCrashThread()) {
507 crashing_thread_context_ = cpu.location();
508 if (!dumper_->IsPostMortem()) {
509 // This is the crashing thread of a live process, but
510 // no context was provided, so set the crash address
511 // while the instruction pointer is already here.
512 dumper_->set_crash_address(info.GetInstructionPointer());
513 }
514 }
515 }
516
517 list.CopyIndexAfterObject(i, &thread, sizeof(thread));
518 }
519
520 return true;
521 }
522
523 // Write application-provided memory regions.
WriteAppMemory()524 bool WriteAppMemory() {
525 for (AppMemoryList::const_iterator iter = app_memory_list_.begin();
526 iter != app_memory_list_.end();
527 ++iter) {
528 uint8_t* data_copy =
529 reinterpret_cast<uint8_t*>(dumper_->allocator()->Alloc(iter->length));
530 dumper_->CopyFromProcess(data_copy, GetCrashThread(), iter->ptr,
531 iter->length);
532
533 UntypedMDRVA memory(&minidump_writer_);
534 if (!memory.Allocate(iter->length)) {
535 return false;
536 }
537 memory.Copy(data_copy, iter->length);
538 MDMemoryDescriptor desc;
539 desc.start_of_memory_range = reinterpret_cast<uintptr_t>(iter->ptr);
540 desc.memory = memory.location();
541 memory_blocks_.push_back(desc);
542 }
543
544 return true;
545 }
546
ShouldIncludeMapping(const MappingInfo & mapping)547 static bool ShouldIncludeMapping(const MappingInfo& mapping) {
548 if (mapping.name[0] == 0 || // only want modules with filenames.
549 // Only want to include one mapping per shared lib.
550 // Avoid filtering executable mappings.
551 (mapping.offset != 0 && !mapping.exec) ||
552 mapping.size < 4096) { // too small to get a signature for.
553 return false;
554 }
555
556 return true;
557 }
558
559 // If there is caller-provided information about this mapping
560 // in the mapping_list_ list, return true. Otherwise, return false.
HaveMappingInfo(const MappingInfo & mapping)561 bool HaveMappingInfo(const MappingInfo& mapping) {
562 for (MappingList::const_iterator iter = mapping_list_.begin();
563 iter != mapping_list_.end();
564 ++iter) {
565 // Ignore any mappings that are wholly contained within
566 // mappings in the mapping_info_ list.
567 if (mapping.start_addr >= iter->first.start_addr &&
568 (mapping.start_addr + mapping.size) <=
569 (iter->first.start_addr + iter->first.size)) {
570 return true;
571 }
572 }
573 return false;
574 }
575
576 // Write information about the mappings in effect. Because we are using the
577 // minidump format, the information about the mappings is pretty limited.
578 // Because of this, we also include the full, unparsed, /proc/$x/maps file in
579 // another stream in the file.
WriteMappings(MDRawDirectory * dirent)580 bool WriteMappings(MDRawDirectory* dirent) {
581 const unsigned num_mappings = dumper_->mappings().size();
582 unsigned num_output_mappings = mapping_list_.size();
583
584 for (unsigned i = 0; i < dumper_->mappings().size(); ++i) {
585 const MappingInfo& mapping = *dumper_->mappings()[i];
586 if (ShouldIncludeMapping(mapping) && !HaveMappingInfo(mapping))
587 num_output_mappings++;
588 }
589
590 TypedMDRVA<uint32_t> list(&minidump_writer_);
591 if (num_output_mappings) {
592 if (!list.AllocateObjectAndArray(num_output_mappings, MD_MODULE_SIZE))
593 return false;
594 } else {
595 // Still create the module list stream, although it will have zero
596 // modules.
597 if (!list.Allocate())
598 return false;
599 }
600
601 dirent->stream_type = MD_MODULE_LIST_STREAM;
602 dirent->location = list.location();
603 *list.get() = num_output_mappings;
604
605 // First write all the mappings from the dumper
606 unsigned int j = 0;
607 for (unsigned i = 0; i < num_mappings; ++i) {
608 const MappingInfo& mapping = *dumper_->mappings()[i];
609 if (!ShouldIncludeMapping(mapping) || HaveMappingInfo(mapping))
610 continue;
611
612 MDRawModule mod;
613 if (!FillRawModule(mapping, true, i, &mod, NULL))
614 return false;
615 list.CopyIndexAfterObject(j++, &mod, MD_MODULE_SIZE);
616 }
617 // Next write all the mappings provided by the caller
618 for (MappingList::const_iterator iter = mapping_list_.begin();
619 iter != mapping_list_.end();
620 ++iter) {
621 MDRawModule mod;
622 if (!FillRawModule(iter->first, false, 0, &mod, iter->second))
623 return false;
624 list.CopyIndexAfterObject(j++, &mod, MD_MODULE_SIZE);
625 }
626
627 return true;
628 }
629
630 // Fill the MDRawModule |mod| with information about the provided
631 // |mapping|. If |identifier| is non-NULL, use it instead of calculating
632 // a file ID from the mapping.
FillRawModule(const MappingInfo & mapping,bool member,unsigned int mapping_id,MDRawModule * mod,const uint8_t * identifier)633 bool FillRawModule(const MappingInfo& mapping,
634 bool member,
635 unsigned int mapping_id,
636 MDRawModule* mod,
637 const uint8_t* identifier) {
638 my_memset(mod, 0, MD_MODULE_SIZE);
639
640 mod->base_of_image = mapping.start_addr;
641 mod->size_of_image = mapping.size;
642
643 char file_name[NAME_MAX];
644 char file_path[NAME_MAX];
645
646 dumper_->GetMappingEffectiveNameAndPath(mapping, file_path,
647 sizeof(file_path), file_name,
648 sizeof(file_name));
649
650 RSDS_DEBUG_FORMAT rsds;
651 PEFileFormat file_format = PEFile::TryGetDebugInfo(file_path, &rsds);
652
653 if (file_format == PEFileFormat::notPeCoff) {
654 // The module is not a PE/COFF file, process as an ELF.
655 auto_wasteful_vector<uint8_t, kDefaultBuildIdSize> identifier_bytes(
656 dumper_->allocator());
657
658 if (identifier) {
659 // GUID was provided by caller.
660 identifier_bytes.insert(identifier_bytes.end(), identifier,
661 identifier + sizeof(MDGUID));
662 } else {
663 // Note: ElfFileIdentifierForMapping() can manipulate the
664 // |mapping.name|, that is why we need to call the method
665 // GetMappingEffectiveNameAndPath again.
666 dumper_->ElfFileIdentifierForMapping(mapping, member, mapping_id,
667 identifier_bytes);
668 dumper_->GetMappingEffectiveNameAndPath(mapping, file_path,
669 sizeof(file_path), file_name,
670 sizeof(file_name));
671 }
672
673 if (!identifier_bytes.empty()) {
674 UntypedMDRVA cv(&minidump_writer_);
675 if (!cv.Allocate(MDCVInfoELF_minsize + identifier_bytes.size()))
676 return false;
677
678 const uint32_t cv_signature = MD_CVINFOELF_SIGNATURE;
679 cv.Copy(&cv_signature, sizeof(cv_signature));
680 cv.Copy(cv.position() + sizeof(cv_signature), &identifier_bytes[0],
681 identifier_bytes.size());
682
683 mod->cv_record = cv.location();
684 }
685 } else {
686 // The module is a PE/COFF file. Create MDCVInfoPDB70 struct for it.
687 size_t file_name_length = strlen(file_name);
688 TypedMDRVA<MDCVInfoPDB70> cv(&minidump_writer_);
689 if (!cv.AllocateObjectAndArray(file_name_length + 1, sizeof(uint8_t)))
690 return false;
691 if (!cv.CopyIndexAfterObject(0, file_name, file_name_length))
692 return false;
693 MDCVInfoPDB70* cv_ptr = cv.get();
694 cv_ptr->cv_signature = MD_CVINFOPDB70_SIGNATURE;
695 if (file_format == PEFileFormat::peWithBuildId) {
696 // Populate BuildId and age using RSDS instance.
697 cv_ptr->signature.data1 = static_cast<uint32_t>(rsds.guid[0]) << 24 |
698 static_cast<uint32_t>(rsds.guid[1]) << 16 |
699 static_cast<uint32_t>(rsds.guid[2]) << 8 |
700 static_cast<uint32_t>(rsds.guid[3]);
701 cv_ptr->signature.data2 =
702 static_cast<uint16_t>(rsds.guid[4]) << 8 | rsds.guid[5];
703 cv_ptr->signature.data3 =
704 static_cast<uint16_t>(rsds.guid[6]) << 8 | rsds.guid[7];
705 cv_ptr->signature.data4[0] = rsds.guid[8];
706 cv_ptr->signature.data4[1] = rsds.guid[9];
707 cv_ptr->signature.data4[2] = rsds.guid[10];
708 cv_ptr->signature.data4[3] = rsds.guid[11];
709 cv_ptr->signature.data4[4] = rsds.guid[12];
710 cv_ptr->signature.data4[5] = rsds.guid[13];
711 cv_ptr->signature.data4[6] = rsds.guid[14];
712 cv_ptr->signature.data4[7] = rsds.guid[15];
713 // The Age field should be reverted as well.
714 cv_ptr->age = static_cast<uint32_t>(rsds.age[0]) << 24 |
715 static_cast<uint32_t>(rsds.age[1]) << 16 |
716 static_cast<uint32_t>(rsds.age[2]) << 8 |
717 static_cast<uint32_t>(rsds.age[3]);
718 } else {
719 cv_ptr->age = 0;
720 }
721
722 mod->cv_record = cv.location();
723 }
724
725 MDLocationDescriptor ld;
726 if (!minidump_writer_.WriteString(file_path, my_strlen(file_path), &ld))
727 return false;
728 mod->module_name_rva = ld.rva;
729 return true;
730 }
731
WriteMemoryListStream(MDRawDirectory * dirent)732 bool WriteMemoryListStream(MDRawDirectory* dirent) {
733 TypedMDRVA<uint32_t> list(&minidump_writer_);
734 if (memory_blocks_.size()) {
735 if (!list.AllocateObjectAndArray(memory_blocks_.size(),
736 sizeof(MDMemoryDescriptor)))
737 return false;
738 } else {
739 // Still create the memory list stream, although it will have zero
740 // memory blocks.
741 if (!list.Allocate())
742 return false;
743 }
744
745 dirent->stream_type = MD_MEMORY_LIST_STREAM;
746 dirent->location = list.location();
747
748 *list.get() = memory_blocks_.size();
749
750 for (size_t i = 0; i < memory_blocks_.size(); ++i) {
751 list.CopyIndexAfterObject(i, &memory_blocks_[i],
752 sizeof(MDMemoryDescriptor));
753 }
754 return true;
755 }
756
WriteExceptionStream(MDRawDirectory * dirent)757 bool WriteExceptionStream(MDRawDirectory* dirent) {
758 TypedMDRVA<MDRawExceptionStream> exc(&minidump_writer_);
759 if (!exc.Allocate())
760 return false;
761
762 MDRawExceptionStream* stream = exc.get();
763 my_memset(stream, 0, sizeof(MDRawExceptionStream));
764
765 dirent->stream_type = MD_EXCEPTION_STREAM;
766 dirent->location = exc.location();
767
768 stream->thread_id = GetCrashThread();
769 stream->exception_record.exception_code = dumper_->crash_signal();
770 stream->exception_record.exception_flags = dumper_->crash_signal_code();
771 stream->exception_record.exception_address = dumper_->crash_address();
772 const std::vector<uint64_t> crash_exception_info =
773 dumper_->crash_exception_info();
774 stream->exception_record.number_parameters = crash_exception_info.size();
775 memcpy(stream->exception_record.exception_information,
776 crash_exception_info.data(),
777 sizeof(uint64_t) * crash_exception_info.size());
778 stream->thread_context = crashing_thread_context_;
779
780 return true;
781 }
782
WriteSystemInfoStream(MDRawDirectory * dirent)783 bool WriteSystemInfoStream(MDRawDirectory* dirent) {
784 TypedMDRVA<MDRawSystemInfo> si(&minidump_writer_);
785 if (!si.Allocate())
786 return false;
787 my_memset(si.get(), 0, sizeof(MDRawSystemInfo));
788
789 dirent->stream_type = MD_SYSTEM_INFO_STREAM;
790 dirent->location = si.location();
791
792 WriteCPUInformation(si.get());
793 WriteOSInformation(si.get());
794
795 return true;
796 }
797
WriteDSODebugStream(MDRawDirectory * dirent)798 bool WriteDSODebugStream(MDRawDirectory* dirent) {
799 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(dumper_->auxv()[AT_PHDR]);
800 char* base;
801 int phnum = dumper_->auxv()[AT_PHNUM];
802 if (!phnum || !phdr)
803 return false;
804
805 // Assume the program base is at the beginning of the same page as the PHDR
806 base = reinterpret_cast<char*>(reinterpret_cast<uintptr_t>(phdr) & ~0xfff);
807
808 // Search for the program PT_DYNAMIC segment
809 ElfW(Addr) dyn_addr = 0;
810 for (; phnum >= 0; phnum--, phdr++) {
811 ElfW(Phdr) ph;
812 if (!dumper_->CopyFromProcess(&ph, GetCrashThread(), phdr, sizeof(ph)))
813 return false;
814
815 // Adjust base address with the virtual address of the PT_LOAD segment
816 // corresponding to offset 0
817 if (ph.p_type == PT_LOAD && ph.p_offset == 0) {
818 base -= ph.p_vaddr;
819 }
820 if (ph.p_type == PT_DYNAMIC) {
821 dyn_addr = ph.p_vaddr;
822 }
823 }
824 if (!dyn_addr)
825 return false;
826
827 ElfW(Dyn)* dynamic = reinterpret_cast<ElfW(Dyn)*>(dyn_addr + base);
828
829 // The dynamic linker makes information available that helps gdb find all
830 // DSOs loaded into the program. If this information is indeed available,
831 // dump it to a MD_LINUX_DSO_DEBUG stream.
832 struct r_debug* r_debug = NULL;
833 uint32_t dynamic_length = 0;
834
835 for (int i = 0; ; ++i) {
836 ElfW(Dyn) dyn;
837 dynamic_length += sizeof(dyn);
838 if (!dumper_->CopyFromProcess(&dyn, GetCrashThread(), dynamic + i,
839 sizeof(dyn))) {
840 return false;
841 }
842
843 #ifdef __mips__
844 const int32_t debug_tag = DT_MIPS_RLD_MAP;
845 #else
846 const int32_t debug_tag = DT_DEBUG;
847 #endif
848 if (dyn.d_tag == debug_tag) {
849 r_debug = reinterpret_cast<struct r_debug*>(dyn.d_un.d_ptr);
850 continue;
851 } else if (dyn.d_tag == DT_NULL) {
852 break;
853 }
854 }
855
856 // The "r_map" field of that r_debug struct contains a linked list of all
857 // loaded DSOs.
858 // Our list of DSOs potentially is different from the ones in the crashing
859 // process. So, we have to be careful to never dereference pointers
860 // directly. Instead, we use CopyFromProcess() everywhere.
861 // See <link.h> for a more detailed discussion of the how the dynamic
862 // loader communicates with debuggers.
863
864 // Count the number of loaded DSOs
865 int dso_count = 0;
866 struct r_debug debug_entry;
867 if (!dumper_->CopyFromProcess(&debug_entry, GetCrashThread(), r_debug,
868 sizeof(debug_entry))) {
869 return false;
870 }
871 for (struct link_map* ptr = debug_entry.r_map; ptr; ) {
872 struct link_map map;
873 if (!dumper_->CopyFromProcess(&map, GetCrashThread(), ptr, sizeof(map)))
874 return false;
875
876 ptr = map.l_next;
877 dso_count++;
878 }
879
880 MDRVA linkmap_rva = minidump_writer_.kInvalidMDRVA;
881 if (dso_count > 0) {
882 // If we have at least one DSO, create an array of MDRawLinkMap
883 // entries in the minidump file.
884 TypedMDRVA<MDRawLinkMap> linkmap(&minidump_writer_);
885 if (!linkmap.AllocateArray(dso_count))
886 return false;
887 linkmap_rva = linkmap.location().rva;
888 int idx = 0;
889
890 // Iterate over DSOs and write their information to mini dump
891 for (struct link_map* ptr = debug_entry.r_map; ptr; ) {
892 struct link_map map;
893 if (!dumper_->CopyFromProcess(&map, GetCrashThread(), ptr, sizeof(map)))
894 return false;
895
896 ptr = map.l_next;
897 char filename[257] = { 0 };
898 if (map.l_name) {
899 dumper_->CopyFromProcess(filename, GetCrashThread(), map.l_name,
900 sizeof(filename) - 1);
901 }
902 MDLocationDescriptor location;
903 if (!minidump_writer_.WriteString(filename, 0, &location))
904 return false;
905 MDRawLinkMap entry;
906 entry.name = location.rva;
907 entry.addr = map.l_addr;
908 entry.ld = reinterpret_cast<uintptr_t>(map.l_ld);
909 linkmap.CopyIndex(idx++, &entry);
910 }
911 }
912
913 // Write MD_LINUX_DSO_DEBUG record
914 TypedMDRVA<MDRawDebug> debug(&minidump_writer_);
915 if (!debug.AllocateObjectAndArray(1, dynamic_length))
916 return false;
917 my_memset(debug.get(), 0, sizeof(MDRawDebug));
918 dirent->stream_type = MD_LINUX_DSO_DEBUG;
919 dirent->location = debug.location();
920
921 debug.get()->version = debug_entry.r_version;
922 debug.get()->map = linkmap_rva;
923 debug.get()->dso_count = dso_count;
924 debug.get()->brk = debug_entry.r_brk;
925 debug.get()->ldbase = debug_entry.r_ldbase;
926 debug.get()->dynamic = reinterpret_cast<uintptr_t>(dynamic);
927
928 wasteful_vector<char> dso_debug_data(dumper_->allocator(), dynamic_length);
929 // The passed-in size to the constructor (above) is only a hint.
930 // Must call .resize() to do actual initialization of the elements.
931 dso_debug_data.resize(dynamic_length);
932 dumper_->CopyFromProcess(&dso_debug_data[0], GetCrashThread(), dynamic,
933 dynamic_length);
934 debug.CopyIndexAfterObject(0, &dso_debug_data[0], dynamic_length);
935
936 return true;
937 }
938
set_minidump_size_limit(off_t limit)939 void set_minidump_size_limit(off_t limit) { minidump_size_limit_ = limit; }
940
941 private:
Alloc(unsigned bytes)942 void* Alloc(unsigned bytes) {
943 return dumper_->allocator()->Alloc(bytes);
944 }
945
GetCrashThread() const946 pid_t GetCrashThread() const {
947 return dumper_->crash_thread();
948 }
949
NullifyDirectoryEntry(MDRawDirectory * dirent)950 void NullifyDirectoryEntry(MDRawDirectory* dirent) {
951 dirent->stream_type = 0;
952 dirent->location.data_size = 0;
953 dirent->location.rva = 0;
954 }
955
956 #if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
WriteCPUInformation(MDRawSystemInfo * sys_info)957 bool WriteCPUInformation(MDRawSystemInfo* sys_info) {
958 char vendor_id[sizeof(sys_info->cpu.x86_cpu_info.vendor_id) + 1] = {0};
959 static const char vendor_id_name[] = "vendor_id";
960
961 struct CpuInfoEntry {
962 const char* info_name;
963 int value;
964 bool found;
965 } cpu_info_table[] = {
966 { "processor", -1, false },
967 #if defined(__i386__) || defined(__x86_64__)
968 { "model", 0, false },
969 { "stepping", 0, false },
970 { "cpu family", 0, false },
971 #endif
972 };
973
974 // processor_architecture should always be set, do this first
975 sys_info->processor_architecture =
976 #if defined(__mips__)
977 # if _MIPS_SIM == _ABIO32
978 MD_CPU_ARCHITECTURE_MIPS;
979 # elif _MIPS_SIM == _ABI64
980 MD_CPU_ARCHITECTURE_MIPS64;
981 # else
982 # error "This mips ABI is currently not supported (n32)"
983 #endif
984 #elif defined(__i386__)
985 MD_CPU_ARCHITECTURE_X86;
986 #else
987 MD_CPU_ARCHITECTURE_AMD64;
988 #endif
989
990 const int fd = sys_open("/proc/cpuinfo", O_RDONLY, 0);
991 if (fd < 0)
992 return false;
993
994 {
995 PageAllocator allocator;
996 ProcCpuInfoReader* const reader = new(allocator) ProcCpuInfoReader(fd);
997 const char* field;
998 while (reader->GetNextField(&field)) {
999 bool is_first_entry = true;
1000 for (CpuInfoEntry& entry : cpu_info_table) {
1001 if (!is_first_entry && entry.found) {
1002 // except for the 'processor' field, ignore repeated values.
1003 continue;
1004 }
1005 is_first_entry = false;
1006 if (!my_strcmp(field, entry.info_name)) {
1007 size_t value_len;
1008 const char* value = reader->GetValueAndLen(&value_len);
1009 if (value_len == 0)
1010 continue;
1011
1012 uintptr_t val;
1013 if (my_read_decimal_ptr(&val, value) == value)
1014 continue;
1015
1016 entry.value = static_cast<int>(val);
1017 entry.found = true;
1018 }
1019 }
1020
1021 // special case for vendor_id
1022 if (!my_strcmp(field, vendor_id_name)) {
1023 size_t value_len;
1024 const char* value = reader->GetValueAndLen(&value_len);
1025 if (value_len > 0)
1026 my_strlcpy(vendor_id, value, sizeof(vendor_id));
1027 }
1028 }
1029 sys_close(fd);
1030 }
1031
1032 // make sure we got everything we wanted
1033 for (const CpuInfoEntry& entry : cpu_info_table) {
1034 if (!entry.found) {
1035 return false;
1036 }
1037 }
1038 // cpu_info_table[0] holds the last cpu id listed in /proc/cpuinfo,
1039 // assuming this is the highest id, change it to the number of CPUs
1040 // by adding one.
1041 cpu_info_table[0].value++;
1042
1043 sys_info->number_of_processors = cpu_info_table[0].value;
1044 #if defined(__i386__) || defined(__x86_64__)
1045 sys_info->processor_level = cpu_info_table[3].value;
1046 sys_info->processor_revision = cpu_info_table[1].value << 8 |
1047 cpu_info_table[2].value;
1048 #endif
1049
1050 if (vendor_id[0] != '\0') {
1051 my_memcpy(sys_info->cpu.x86_cpu_info.vendor_id, vendor_id,
1052 sizeof(sys_info->cpu.x86_cpu_info.vendor_id));
1053 }
1054 return true;
1055 }
1056 #elif defined(__arm__) || defined(__aarch64__)
WriteCPUInformation(MDRawSystemInfo * sys_info)1057 bool WriteCPUInformation(MDRawSystemInfo* sys_info) {
1058 // The CPUID value is broken up in several entries in /proc/cpuinfo.
1059 // This table is used to rebuild it from the entries.
1060 const struct CpuIdEntry {
1061 const char* field;
1062 char format;
1063 char bit_lshift;
1064 char bit_length;
1065 } cpu_id_entries[] = {
1066 { "CPU implementer", 'x', 24, 8 },
1067 { "CPU variant", 'x', 20, 4 },
1068 { "CPU part", 'x', 4, 12 },
1069 { "CPU revision", 'd', 0, 4 },
1070 };
1071
1072 // The ELF hwcaps are listed in the "Features" entry as textual tags.
1073 // This table is used to rebuild them.
1074 const struct CpuFeaturesEntry {
1075 const char* tag;
1076 uint32_t hwcaps;
1077 } cpu_features_entries[] = {
1078 #if defined(__arm__)
1079 { "swp", MD_CPU_ARM_ELF_HWCAP_SWP },
1080 { "half", MD_CPU_ARM_ELF_HWCAP_HALF },
1081 { "thumb", MD_CPU_ARM_ELF_HWCAP_THUMB },
1082 { "26bit", MD_CPU_ARM_ELF_HWCAP_26BIT },
1083 { "fastmult", MD_CPU_ARM_ELF_HWCAP_FAST_MULT },
1084 { "fpa", MD_CPU_ARM_ELF_HWCAP_FPA },
1085 { "vfp", MD_CPU_ARM_ELF_HWCAP_VFP },
1086 { "edsp", MD_CPU_ARM_ELF_HWCAP_EDSP },
1087 { "java", MD_CPU_ARM_ELF_HWCAP_JAVA },
1088 { "iwmmxt", MD_CPU_ARM_ELF_HWCAP_IWMMXT },
1089 { "crunch", MD_CPU_ARM_ELF_HWCAP_CRUNCH },
1090 { "thumbee", MD_CPU_ARM_ELF_HWCAP_THUMBEE },
1091 { "neon", MD_CPU_ARM_ELF_HWCAP_NEON },
1092 { "vfpv3", MD_CPU_ARM_ELF_HWCAP_VFPv3 },
1093 { "vfpv3d16", MD_CPU_ARM_ELF_HWCAP_VFPv3D16 },
1094 { "tls", MD_CPU_ARM_ELF_HWCAP_TLS },
1095 { "vfpv4", MD_CPU_ARM_ELF_HWCAP_VFPv4 },
1096 { "idiva", MD_CPU_ARM_ELF_HWCAP_IDIVA },
1097 { "idivt", MD_CPU_ARM_ELF_HWCAP_IDIVT },
1098 { "idiv", MD_CPU_ARM_ELF_HWCAP_IDIVA | MD_CPU_ARM_ELF_HWCAP_IDIVT },
1099 #elif defined(__aarch64__)
1100 // No hwcaps on aarch64.
1101 #endif
1102 };
1103
1104 // processor_architecture should always be set, do this first
1105 sys_info->processor_architecture =
1106 #if defined(__aarch64__)
1107 MD_CPU_ARCHITECTURE_ARM64_OLD;
1108 #else
1109 MD_CPU_ARCHITECTURE_ARM;
1110 #endif
1111
1112 // /proc/cpuinfo is not readable under various sandboxed environments
1113 // (e.g. Android services with the android:isolatedProcess attribute)
1114 // prepare for this by setting default values now, which will be
1115 // returned when this happens.
1116 //
1117 // Note: Bogus values are used to distinguish between failures (to
1118 // read /sys and /proc files) and really badly configured kernels.
1119 sys_info->number_of_processors = 0;
1120 sys_info->processor_level = 1U; // There is no ARMv1
1121 sys_info->processor_revision = 42;
1122 sys_info->cpu.arm_cpu_info.cpuid = 0;
1123 sys_info->cpu.arm_cpu_info.elf_hwcaps = 0;
1124
1125 // Counting the number of CPUs involves parsing two sysfs files,
1126 // because the content of /proc/cpuinfo will only mirror the number
1127 // of 'online' cores, and thus will vary with time.
1128 // See http://www.kernel.org/doc/Documentation/cputopology.txt
1129 {
1130 CpuSet cpus_present;
1131 CpuSet cpus_possible;
1132
1133 int fd = sys_open("/sys/devices/system/cpu/present", O_RDONLY, 0);
1134 if (fd >= 0) {
1135 cpus_present.ParseSysFile(fd);
1136 sys_close(fd);
1137
1138 fd = sys_open("/sys/devices/system/cpu/possible", O_RDONLY, 0);
1139 if (fd >= 0) {
1140 cpus_possible.ParseSysFile(fd);
1141 sys_close(fd);
1142
1143 cpus_present.IntersectWith(cpus_possible);
1144 int cpu_count = std::min(255, cpus_present.GetCount());
1145 sys_info->number_of_processors = static_cast<uint8_t>(cpu_count);
1146 }
1147 }
1148 }
1149
1150 // Parse /proc/cpuinfo to reconstruct the CPUID value, as well
1151 // as the ELF hwcaps field. For the latter, it would be easier to
1152 // read /proc/self/auxv but unfortunately, this file is not always
1153 // readable from regular Android applications on later versions
1154 // (>= 4.1) of the Android platform.
1155 const int fd = sys_open("/proc/cpuinfo", O_RDONLY, 0);
1156 if (fd < 0) {
1157 // Do not return false here to allow the minidump generation
1158 // to happen properly.
1159 return true;
1160 }
1161
1162 {
1163 PageAllocator allocator;
1164 ProcCpuInfoReader* const reader =
1165 new(allocator) ProcCpuInfoReader(fd);
1166 const char* field;
1167 while (reader->GetNextField(&field)) {
1168 for (const CpuIdEntry& entry : cpu_id_entries) {
1169 if (my_strcmp(entry.field, field) != 0)
1170 continue;
1171 uintptr_t result = 0;
1172 const char* value = reader->GetValue();
1173 const char* p = value;
1174 if (value[0] == '0' && value[1] == 'x') {
1175 p = my_read_hex_ptr(&result, value+2);
1176 } else if (entry.format == 'x') {
1177 p = my_read_hex_ptr(&result, value);
1178 } else {
1179 p = my_read_decimal_ptr(&result, value);
1180 }
1181 if (p == value)
1182 continue;
1183
1184 result &= (1U << entry.bit_length)-1;
1185 result <<= entry.bit_lshift;
1186 sys_info->cpu.arm_cpu_info.cpuid |=
1187 static_cast<uint32_t>(result);
1188 }
1189 #if defined(__arm__)
1190 // Get the architecture version from the "Processor" field.
1191 // Note that it is also available in the "CPU architecture" field,
1192 // however, some existing kernels are misconfigured and will report
1193 // invalid values here (e.g. 6, while the CPU is ARMv7-A based).
1194 // The "Processor" field doesn't have this issue.
1195 if (!my_strcmp(field, "Processor")) {
1196 size_t value_len;
1197 const char* value = reader->GetValueAndLen(&value_len);
1198 // Expected format: <text> (v<level><endian>)
1199 // Where <text> is some text like "ARMv7 Processor rev 2"
1200 // and <level> is a decimal corresponding to the ARM
1201 // architecture number. <endian> is either 'l' or 'b'
1202 // and corresponds to the endianess, it is ignored here.
1203 while (value_len > 0 && my_isspace(value[value_len-1]))
1204 value_len--;
1205
1206 size_t nn = value_len;
1207 while (nn > 0 && value[nn-1] != '(')
1208 nn--;
1209 if (nn > 0 && value[nn] == 'v') {
1210 uintptr_t arch_level = 5;
1211 my_read_decimal_ptr(&arch_level, value + nn + 1);
1212 sys_info->processor_level = static_cast<uint16_t>(arch_level);
1213 }
1214 }
1215 #elif defined(__aarch64__)
1216 // The aarch64 architecture does not provide the architecture level
1217 // in the Processor field, so we instead check the "CPU architecture"
1218 // field.
1219 if (!my_strcmp(field, "CPU architecture")) {
1220 uintptr_t arch_level = 0;
1221 const char* value = reader->GetValue();
1222 const char* p = value;
1223 p = my_read_decimal_ptr(&arch_level, value);
1224 if (p == value)
1225 continue;
1226 sys_info->processor_level = static_cast<uint16_t>(arch_level);
1227 }
1228 #endif
1229 // Rebuild the ELF hwcaps from the 'Features' field.
1230 if (!my_strcmp(field, "Features")) {
1231 size_t value_len;
1232 const char* value = reader->GetValueAndLen(&value_len);
1233
1234 // Parse each space-separated tag.
1235 while (value_len > 0) {
1236 const char* tag = value;
1237 size_t tag_len = value_len;
1238 const char* p = my_strchr(tag, ' ');
1239 if (p) {
1240 tag_len = static_cast<size_t>(p - tag);
1241 value += tag_len + 1;
1242 value_len -= tag_len + 1;
1243 } else {
1244 tag_len = strlen(tag);
1245 value_len = 0;
1246 }
1247 for (const CpuFeaturesEntry& entry : cpu_features_entries) {
1248 if (tag_len == strlen(entry.tag) &&
1249 !memcmp(tag, entry.tag, tag_len)) {
1250 sys_info->cpu.arm_cpu_info.elf_hwcaps |= entry.hwcaps;
1251 break;
1252 }
1253 }
1254 }
1255 }
1256 }
1257 sys_close(fd);
1258 }
1259
1260 return true;
1261 }
1262 #elif defined(__riscv)
WriteCPUInformation(MDRawSystemInfo * sys_info)1263 bool WriteCPUInformation(MDRawSystemInfo* sys_info) {
1264 // processor_architecture should always be set, do this first
1265 # if __riscv_xlen == 32
1266 sys_info->processor_architecture = MD_CPU_ARCHITECTURE_RISCV;
1267 # elif __riscv_xlen == 64
1268 sys_info->processor_architecture = MD_CPU_ARCHITECTURE_RISCV64;
1269 # else
1270 # error "Unexpected __riscv_xlen"
1271 # endif
1272
1273 // /proc/cpuinfo is not readable under various sandboxed environments
1274 // (e.g. Android services with the android:isolatedProcess attribute)
1275 // prepare for this by setting default values now, which will be
1276 // returned when this happens.
1277 //
1278 // Note: Bogus values are used to distinguish between failures (to
1279 // read /sys and /proc files) and really badly configured kernels.
1280 sys_info->number_of_processors = 0;
1281 sys_info->processor_level = 0U;
1282 sys_info->processor_revision = 42;
1283 sys_info->cpu.other_cpu_info.processor_features[0] = 0;
1284 sys_info->cpu.other_cpu_info.processor_features[1] = 0;
1285
1286 // Counting the number of CPUs involves parsing two sysfs files,
1287 // because the content of /proc/cpuinfo will only mirror the number
1288 // of 'online' cores, and thus will vary with time.
1289 // See http://www.kernel.org/doc/Documentation/cputopology.txt
1290 {
1291 CpuSet cpus_present;
1292 CpuSet cpus_possible;
1293
1294 int fd = sys_open("/sys/devices/system/cpu/present",
1295 O_RDONLY | O_CLOEXEC, 0);
1296 if (fd >= 0) {
1297 cpus_present.ParseSysFile(fd);
1298 sys_close(fd);
1299
1300 fd = sys_open("/sys/devices/system/cpu/possible",
1301 O_RDONLY | O_CLOEXEC, 0);
1302 if (fd >= 0) {
1303 cpus_possible.ParseSysFile(fd);
1304 sys_close(fd);
1305
1306 cpus_present.IntersectWith(cpus_possible);
1307 int cpu_count = std::min(255, cpus_present.GetCount());
1308 sys_info->number_of_processors = static_cast<uint8_t>(cpu_count);
1309 }
1310 }
1311 }
1312
1313 return true;
1314 }
1315 #else
1316 # error "Unsupported CPU"
1317 #endif
1318
WriteFile(MDLocationDescriptor * result,const char * filename)1319 bool WriteFile(MDLocationDescriptor* result, const char* filename) {
1320 const int fd = sys_open(filename, O_RDONLY, 0);
1321 if (fd < 0)
1322 return false;
1323
1324 // We can't stat the files because several of the files that we want to
1325 // read are kernel seqfiles, which always have a length of zero. So we have
1326 // to read as much as we can into a buffer.
1327 static const unsigned kBufSize = 1024 - 2*sizeof(void*);
1328 struct Buffers {
1329 Buffers* next;
1330 size_t len;
1331 uint8_t data[kBufSize];
1332 }* buffers = reinterpret_cast<Buffers*>(Alloc(sizeof(Buffers)));
1333 buffers->next = NULL;
1334 buffers->len = 0;
1335
1336 size_t total = 0;
1337 for (Buffers* bufptr = buffers;;) {
1338 ssize_t r;
1339 do {
1340 r = sys_read(fd, &bufptr->data[bufptr->len], kBufSize - bufptr->len);
1341 } while (r == -1 && errno == EINTR);
1342
1343 if (r < 1)
1344 break;
1345
1346 total += r;
1347 bufptr->len += r;
1348 if (bufptr->len == kBufSize) {
1349 bufptr->next = reinterpret_cast<Buffers*>(Alloc(sizeof(Buffers)));
1350 bufptr = bufptr->next;
1351 bufptr->next = NULL;
1352 bufptr->len = 0;
1353 }
1354 }
1355 sys_close(fd);
1356
1357 if (!total)
1358 return false;
1359
1360 UntypedMDRVA memory(&minidump_writer_);
1361 if (!memory.Allocate(total))
1362 return false;
1363 for (MDRVA pos = memory.position(); buffers; buffers = buffers->next) {
1364 // Check for special case of a zero-length buffer. This should only
1365 // occur if a file's size happens to be a multiple of the buffer's
1366 // size, in which case the final sys_read() will have resulted in
1367 // zero bytes being read after the final buffer was just allocated.
1368 if (buffers->len == 0) {
1369 // This can only occur with final buffer.
1370 assert(buffers->next == NULL);
1371 continue;
1372 }
1373 memory.Copy(pos, &buffers->data, buffers->len);
1374 pos += buffers->len;
1375 }
1376 *result = memory.location();
1377 return true;
1378 }
1379
WriteOSInformation(MDRawSystemInfo * sys_info)1380 bool WriteOSInformation(MDRawSystemInfo* sys_info) {
1381 #if defined(__ANDROID__)
1382 sys_info->platform_id = MD_OS_ANDROID;
1383 #else
1384 sys_info->platform_id = MD_OS_LINUX;
1385 #endif
1386
1387 struct utsname uts;
1388 if (uname(&uts))
1389 return false;
1390
1391 static const size_t buf_len = 512;
1392 char buf[buf_len] = {0};
1393 size_t space_left = buf_len - 1;
1394 const char* info_table[] = {
1395 uts.sysname,
1396 uts.release,
1397 uts.version,
1398 uts.machine,
1399 NULL
1400 };
1401 bool first_item = true;
1402 for (const char** cur_info = info_table; *cur_info; cur_info++) {
1403 static const char separator[] = " ";
1404 size_t separator_len = sizeof(separator) - 1;
1405 size_t info_len = my_strlen(*cur_info);
1406 if (info_len == 0)
1407 continue;
1408
1409 if (space_left < info_len + (first_item ? 0 : separator_len))
1410 break;
1411
1412 if (!first_item) {
1413 my_strlcat(buf, separator, sizeof(buf));
1414 space_left -= separator_len;
1415 }
1416
1417 first_item = false;
1418 my_strlcat(buf, *cur_info, sizeof(buf));
1419 space_left -= info_len;
1420 }
1421
1422 MDLocationDescriptor location;
1423 if (!minidump_writer_.WriteString(buf, 0, &location))
1424 return false;
1425 sys_info->csd_version_rva = location.rva;
1426
1427 return true;
1428 }
1429
WriteProcFile(MDLocationDescriptor * result,pid_t pid,const char * filename)1430 bool WriteProcFile(MDLocationDescriptor* result, pid_t pid,
1431 const char* filename) {
1432 char buf[NAME_MAX];
1433 if (!dumper_->BuildProcPath(buf, pid, filename))
1434 return false;
1435 return WriteFile(result, buf);
1436 }
1437
1438 // Only one of the 2 member variables below should be set to a valid value.
1439 const int fd_; // File descriptor where the minidum should be written.
1440 const char* path_; // Path to the file where the minidum should be written.
1441
1442 const ucontext_t* const ucontext_; // also from the signal handler
1443 #if GOOGLE_BREAKPAD_CRASH_CONTEXT_HAS_FLOAT_STATE
1444 const google_breakpad::fpstate_t* const float_state_; // ditto
1445 #endif
1446 LinuxDumper* dumper_;
1447 MinidumpFileWriter minidump_writer_;
1448 off_t minidump_size_limit_;
1449 MDLocationDescriptor crashing_thread_context_;
1450 // Blocks of memory written to the dump. These are all currently
1451 // written while writing the thread list stream, but saved here
1452 // so a memory list stream can be written afterwards.
1453 wasteful_vector<MDMemoryDescriptor> memory_blocks_;
1454 // Additional information about some mappings provided by the caller.
1455 const MappingList& mapping_list_;
1456 // Additional memory regions to be included in the dump,
1457 // provided by the caller.
1458 const AppMemoryList& app_memory_list_;
1459 // If set, skip recording any threads that do not reference the
1460 // mapping containing principal_mapping_address_.
1461 bool skip_stacks_if_mapping_unreferenced_;
1462 uintptr_t principal_mapping_address_;
1463 const MappingInfo* principal_mapping_;
1464 // If true, apply stack sanitization to stored stack data.
1465 bool sanitize_stacks_;
1466 };
1467
1468
WriteMinidumpImpl(const char * minidump_path,int minidump_fd,off_t minidump_size_limit,pid_t crashing_process,const void * blob,size_t blob_size,const MappingList & mappings,const AppMemoryList & appmem,bool skip_stacks_if_mapping_unreferenced,uintptr_t principal_mapping_address,bool sanitize_stacks)1469 bool WriteMinidumpImpl(const char* minidump_path,
1470 int minidump_fd,
1471 off_t minidump_size_limit,
1472 pid_t crashing_process,
1473 const void* blob, size_t blob_size,
1474 const MappingList& mappings,
1475 const AppMemoryList& appmem,
1476 bool skip_stacks_if_mapping_unreferenced,
1477 uintptr_t principal_mapping_address,
1478 bool sanitize_stacks) {
1479 LinuxPtraceDumper dumper(crashing_process);
1480 const ExceptionHandler::CrashContext* context = NULL;
1481 if (blob) {
1482 if (blob_size != sizeof(ExceptionHandler::CrashContext))
1483 return false;
1484 context = reinterpret_cast<const ExceptionHandler::CrashContext*>(blob);
1485 dumper.SetCrashInfoFromSigInfo(context->siginfo);
1486 dumper.set_crash_thread(context->tid);
1487 }
1488 MinidumpWriter writer(minidump_path, minidump_fd, context, mappings,
1489 appmem, skip_stacks_if_mapping_unreferenced,
1490 principal_mapping_address, sanitize_stacks, &dumper);
1491 // Set desired limit for file size of minidump (-1 means no limit).
1492 writer.set_minidump_size_limit(minidump_size_limit);
1493 if (!writer.Init())
1494 return false;
1495 return writer.Dump();
1496 }
1497
1498 } // namespace
1499
1500 namespace google_breakpad {
1501
WriteMinidump(const char * minidump_path,pid_t crashing_process,const void * blob,size_t blob_size,bool skip_stacks_if_mapping_unreferenced,uintptr_t principal_mapping_address,bool sanitize_stacks)1502 bool WriteMinidump(const char* minidump_path, pid_t crashing_process,
1503 const void* blob, size_t blob_size,
1504 bool skip_stacks_if_mapping_unreferenced,
1505 uintptr_t principal_mapping_address,
1506 bool sanitize_stacks) {
1507 return WriteMinidumpImpl(minidump_path, -1, -1,
1508 crashing_process, blob, blob_size,
1509 MappingList(), AppMemoryList(),
1510 skip_stacks_if_mapping_unreferenced,
1511 principal_mapping_address,
1512 sanitize_stacks);
1513 }
1514
WriteMinidump(int minidump_fd,pid_t crashing_process,const void * blob,size_t blob_size,bool skip_stacks_if_mapping_unreferenced,uintptr_t principal_mapping_address,bool sanitize_stacks)1515 bool WriteMinidump(int minidump_fd, pid_t crashing_process,
1516 const void* blob, size_t blob_size,
1517 bool skip_stacks_if_mapping_unreferenced,
1518 uintptr_t principal_mapping_address,
1519 bool sanitize_stacks) {
1520 return WriteMinidumpImpl(NULL, minidump_fd, -1,
1521 crashing_process, blob, blob_size,
1522 MappingList(), AppMemoryList(),
1523 skip_stacks_if_mapping_unreferenced,
1524 principal_mapping_address,
1525 sanitize_stacks);
1526 }
1527
WriteMinidump(const char * minidump_path,pid_t process,pid_t process_blamed_thread)1528 bool WriteMinidump(const char* minidump_path, pid_t process,
1529 pid_t process_blamed_thread) {
1530 LinuxPtraceDumper dumper(process);
1531 // MinidumpWriter will set crash address
1532 dumper.set_crash_signal(MD_EXCEPTION_CODE_LIN_DUMP_REQUESTED);
1533 dumper.set_crash_thread(process_blamed_thread);
1534 MappingList mapping_list;
1535 AppMemoryList app_memory_list;
1536 MinidumpWriter writer(minidump_path, -1, NULL, mapping_list,
1537 app_memory_list, false, 0, false, &dumper);
1538 if (!writer.Init())
1539 return false;
1540 return writer.Dump();
1541 }
1542
WriteMinidump(const char * minidump_path,pid_t crashing_process,const void * blob,size_t blob_size,const MappingList & mappings,const AppMemoryList & appmem,bool skip_stacks_if_mapping_unreferenced,uintptr_t principal_mapping_address,bool sanitize_stacks)1543 bool WriteMinidump(const char* minidump_path, pid_t crashing_process,
1544 const void* blob, size_t blob_size,
1545 const MappingList& mappings,
1546 const AppMemoryList& appmem,
1547 bool skip_stacks_if_mapping_unreferenced,
1548 uintptr_t principal_mapping_address,
1549 bool sanitize_stacks) {
1550 return WriteMinidumpImpl(minidump_path, -1, -1, crashing_process,
1551 blob, blob_size,
1552 mappings, appmem,
1553 skip_stacks_if_mapping_unreferenced,
1554 principal_mapping_address,
1555 sanitize_stacks);
1556 }
1557
WriteMinidump(int minidump_fd,pid_t crashing_process,const void * blob,size_t blob_size,const MappingList & mappings,const AppMemoryList & appmem,bool skip_stacks_if_mapping_unreferenced,uintptr_t principal_mapping_address,bool sanitize_stacks)1558 bool WriteMinidump(int minidump_fd, pid_t crashing_process,
1559 const void* blob, size_t blob_size,
1560 const MappingList& mappings,
1561 const AppMemoryList& appmem,
1562 bool skip_stacks_if_mapping_unreferenced,
1563 uintptr_t principal_mapping_address,
1564 bool sanitize_stacks) {
1565 return WriteMinidumpImpl(NULL, minidump_fd, -1, crashing_process,
1566 blob, blob_size,
1567 mappings, appmem,
1568 skip_stacks_if_mapping_unreferenced,
1569 principal_mapping_address,
1570 sanitize_stacks);
1571 }
1572
WriteMinidump(const char * minidump_path,off_t minidump_size_limit,pid_t crashing_process,const void * blob,size_t blob_size,const MappingList & mappings,const AppMemoryList & appmem,bool skip_stacks_if_mapping_unreferenced,uintptr_t principal_mapping_address,bool sanitize_stacks)1573 bool WriteMinidump(const char* minidump_path, off_t minidump_size_limit,
1574 pid_t crashing_process,
1575 const void* blob, size_t blob_size,
1576 const MappingList& mappings,
1577 const AppMemoryList& appmem,
1578 bool skip_stacks_if_mapping_unreferenced,
1579 uintptr_t principal_mapping_address,
1580 bool sanitize_stacks) {
1581 return WriteMinidumpImpl(minidump_path, -1, minidump_size_limit,
1582 crashing_process, blob, blob_size,
1583 mappings, appmem,
1584 skip_stacks_if_mapping_unreferenced,
1585 principal_mapping_address,
1586 sanitize_stacks);
1587 }
1588
WriteMinidump(int minidump_fd,off_t minidump_size_limit,pid_t crashing_process,const void * blob,size_t blob_size,const MappingList & mappings,const AppMemoryList & appmem,bool skip_stacks_if_mapping_unreferenced,uintptr_t principal_mapping_address,bool sanitize_stacks)1589 bool WriteMinidump(int minidump_fd, off_t minidump_size_limit,
1590 pid_t crashing_process,
1591 const void* blob, size_t blob_size,
1592 const MappingList& mappings,
1593 const AppMemoryList& appmem,
1594 bool skip_stacks_if_mapping_unreferenced,
1595 uintptr_t principal_mapping_address,
1596 bool sanitize_stacks) {
1597 return WriteMinidumpImpl(NULL, minidump_fd, minidump_size_limit,
1598 crashing_process, blob, blob_size,
1599 mappings, appmem,
1600 skip_stacks_if_mapping_unreferenced,
1601 principal_mapping_address,
1602 sanitize_stacks);
1603 }
1604
WriteMinidump(const char * filename,const MappingList & mappings,const AppMemoryList & appmem,LinuxDumper * dumper)1605 bool WriteMinidump(const char* filename,
1606 const MappingList& mappings,
1607 const AppMemoryList& appmem,
1608 LinuxDumper* dumper) {
1609 MinidumpWriter writer(filename, -1, NULL, mappings, appmem,
1610 false, 0, false, dumper);
1611 if (!writer.Init())
1612 return false;
1613 return writer.Dump();
1614 }
1615
1616 } // namespace google_breakpad
1617