2 * Copyright (c) 2013 The Native Client Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
10 # define DYNAMIC_LOADING_SUPPORT 1
12 # define DYNAMIC_LOADING_SUPPORT 0
14 #if DYNAMIC_LOADING_SUPPORT
28 #include "breakpad/src/google_breakpad/common/minidump_format.h"
29 #if !DYNAMIC_LOADING_SUPPORT
30 # include "native_client/src/include/elf_constants.h"
32 #include "native_client/src/include/nacl/nacl_exception.h"
33 #include "native_client/src/include/nacl/nacl_minidump.h"
34 #include "native_client/src/untrusted/minidump_generator/build_id.h"
37 extern char __executable_start[]; // Start of code segment
38 extern char __etext[]; // End of code segment
40 #if defined(__GLIBC__)
41 // Variable defined by ld.so, used as a workaround for
42 // https://code.google.com/p/nativeclient/issues/detail?id=3431.
43 extern void *__libc_stack_end;
46 class MinidumpAllocator;
48 // Restrict how much of the stack we dump to reduce upload size and to
49 // avoid dynamic allocation.
50 static const size_t kLimitStackDumpSize = 512 * 1024;
52 static const size_t kLimitNonStackSize = 64 * 1024;
54 // The crash reporter is expected to be used in a situation where the
55 // current process is damaged or out of memory, so it avoids dynamic
56 // memory allocation and allocates a fixed-size buffer of the
57 // following size at startup.
58 static const size_t kMinidumpBufferSize =
59 kLimitStackDumpSize + kLimitNonStackSize;
61 static const char *g_module_name = "main.nexe";
62 static nacl_minidump_callback_t g_callback_func;
63 static MinidumpAllocator *g_minidump_writer;
64 static int g_handling_exception = 0;
66 #if !DYNAMIC_LOADING_SUPPORT
67 static MDGUID g_module_build_id;
68 static int g_module_build_id_set;
71 class MinidumpAllocator {
77 explicit MinidumpAllocator(uint32_t size) :
78 buf_(NULL), buf_size_(0), offset_(0) {
79 void *mapping = mmap(NULL, size, PROT_READ | PROT_WRITE,
80 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
81 if (mapping == MAP_FAILED) {
82 perror("minidump: Failed to allocate memory");
85 buf_ = (char *) mapping;
89 ~MinidumpAllocator() {
91 int result = munmap(buf_, buf_size_);
96 bool AllocateSpace(size_t size, char **ptr, uint32_t *position) {
97 if (offset_ + size >= buf_size_)
100 *ptr = buf_ + offset_;
102 memset(*ptr, 0, size);
106 void *Alloc(size_t size) {
109 if (!AllocateSpace(size, &ptr, &position))
114 char *StrDup(const char *str) {
117 size = strlen(str) + 1;
118 str_copy = reinterpret_cast<char *>(Alloc(size));
119 if (str_copy == NULL)
121 memcpy(str_copy, str, size);
125 char *data() { return buf_; }
126 size_t size() { return offset_; }
129 #if DYNAMIC_LOADING_SUPPORT
131 // Limit the number of modules to capture and their name length.
132 static const size_t kLimitModuleListSize = 64 * 1024;
135 struct ModuleEntry *next;
138 uintptr_t code_segment_start;
139 uintptr_t code_segment_size;
142 static MinidumpAllocator *g_module_snapshot;
143 static MinidumpAllocator *g_module_snapshot_workspace;
147 // TypedMDRVA represents a minidump object chunk. This interface is
148 // based on the TypedMDRVA class in the Breakpad codebase. Breakpad's
149 // implementation writes directly to a file, whereas this
150 // implementation constructs the minidump file in memory.
151 template<typename MDType>
153 MinidumpAllocator *writer_;
159 explicit TypedMDRVA(MinidumpAllocator *writer) :
165 // Allocates space for MDType.
167 return AllocateArray(1);
170 // Allocates an array of |count| elements of MDType.
171 bool AllocateArray(size_t count) {
172 size_ = sizeof(MDType) * count;
173 return writer_->AllocateSpace(size_, &ptr_, &position_);
176 // Allocates an array of |count| elements of |size| after object of MDType.
177 bool AllocateObjectAndArray(size_t count, size_t length) {
178 size_ = sizeof(MDType) + count * length;
179 return writer_->AllocateSpace(size_, &ptr_, &position_);
182 // Copy |item| to |index|. Must have been allocated using AllocateArray().
183 void CopyIndexAfterObject(unsigned int index, void *src, size_t length) {
184 size_t offset = sizeof(MDType) + index * length;
185 assert(offset + length <= size_);
186 memcpy(ptr_ + offset, src, length);
189 MDType *get() { return (MDType *) ptr_; }
191 uint32_t position() { return position_; }
193 MDLocationDescriptor location() {
194 MDLocationDescriptor location = { size_, position_ };
200 static void ConvertRegisters(MinidumpAllocator *minidump_writer,
201 struct NaClExceptionContext *context,
202 MDRawThread *thread) {
203 NaClExceptionPortableContext *pcontext =
204 nacl_exception_context_get_portable(context);
205 #define COPY_REG(REG) regs.get()->REG = src_regs->REG
206 switch (context->arch) {
208 struct NaClUserRegisterStateX8632 *src_regs =
209 (struct NaClUserRegisterStateX8632 *) &context->regs;
210 TypedMDRVA<MDRawContextX86> regs(minidump_writer);
211 if (!regs.Allocate())
213 thread->thread_context = regs.location();
214 // TODO(mseaborn): Report x87/SSE registers too.
215 regs.get()->context_flags =
216 MD_CONTEXT_X86_CONTROL | MD_CONTEXT_X86_INTEGER;
224 regs.get()->eip = src_regs->prog_ctr;
225 regs.get()->eflags = src_regs->flags;
226 regs.get()->esp = src_regs->stack_ptr;
230 struct NaClUserRegisterStateX8664 *src_regs =
231 (struct NaClUserRegisterStateX8664 *) &context->regs;
232 TypedMDRVA<MDRawContextAMD64> regs(minidump_writer);
233 if (!regs.Allocate())
235 thread->thread_context = regs.location();
236 // TODO(mseaborn): Report x87/SSE registers too.
237 regs.get()->context_flags =
238 MD_CONTEXT_AMD64_CONTROL | MD_CONTEXT_AMD64_INTEGER;
239 regs.get()->eflags = src_regs->flags;
244 regs.get()->rsp = pcontext->stack_ptr;
245 regs.get()->rbp = pcontext->frame_ptr;
256 regs.get()->rip = pcontext->prog_ctr;
260 struct NaClUserRegisterStateARM *src_regs =
261 (struct NaClUserRegisterStateARM *) &context->regs;
262 TypedMDRVA<MDRawContextARM> regs(minidump_writer);
263 if (!regs.Allocate())
265 thread->thread_context = regs.location();
266 for (int regnum = 0; regnum < 16; regnum++) {
267 regs.get()->iregs[regnum] = ((uint32_t *) &src_regs->r0)[regnum];
269 regs.get()->cpsr = src_regs->cpsr;
273 // Architecture not recognized. Dump the register state anyway.
274 // Maybe we should do this on all architectures, and Breakpad
275 // should be adapted to read NaCl's portable container format
276 // for register state so that the client code is
277 // architecture-neutral.
278 TypedMDRVA<uint8_t> regs(minidump_writer);
279 if (!regs.AllocateArray(context->size))
281 thread->thread_context = regs.location();
282 memcpy(regs.get(), &context, context->size);
289 static MDMemoryDescriptor SnapshotMemory(MinidumpAllocator *minidump_writer,
290 uintptr_t start, size_t size) {
291 TypedMDRVA<uint8_t> mem_copy(minidump_writer);
292 MDMemoryDescriptor desc = {0};
293 if (mem_copy.AllocateArray(size)) {
294 memcpy(mem_copy.get(), (void *) start, size);
296 desc.start_of_memory_range = start;
297 desc.memory = mem_copy.location();
302 static bool GetStackEnd(void **stack_end) {
303 #if defined(__GLIBC__)
307 if (pthread_getattr_np(pthread_self(), &attr) == 0 &&
308 pthread_attr_getstack(&attr, &stack_base, &stack_size) == 0) {
309 *stack_end = (void *) ((char *) stack_base + stack_size);
310 pthread_attr_destroy(&attr);
313 // pthread_getattr_np() currently fails on the initial thread. As a
314 // workaround, if we reach here, assume we are on the initial thread
315 // and get the initial thread's stack end as recorded by glibc.
316 // See https://code.google.com/p/nativeclient/issues/detail?id=3431
317 *stack_end = __libc_stack_end;
320 return pthread_get_stack_end_np(pthread_self(), stack_end) == 0;
324 static void WriteExceptionList(MinidumpAllocator *minidump_writer,
325 MDRawDirectory *dirent,
326 MDLocationDescriptor thread_context) {
327 TypedMDRVA<MDRawExceptionStream> exception(minidump_writer);
328 if (!exception.Allocate())
331 // TODO(bradnelson): Specify a particular thread once we gather more than the
333 exception.get()->thread_id = 0;
334 // TODO(bradnelson): Provide information on the type of exception once we
335 // have it. For now report everything as SIGSEGV.
336 exception.get()->exception_record.exception_code =
337 MD_EXCEPTION_CODE_LIN_SIGSEGV;
338 // TODO(bradnelson): Provide the address of the fault, once we have it.
339 exception.get()->exception_record.exception_address = 0;
340 exception.get()->thread_context = thread_context;
342 dirent->stream_type = MD_EXCEPTION_STREAM;
343 dirent->location = exception.location();
346 static void WriteThreadList(MinidumpAllocator *minidump_writer,
347 MDRawDirectory *dirent,
348 struct NaClExceptionContext *context,
349 MDLocationDescriptor *thread_context_out) {
350 // This records only the thread that crashed.
351 // TODO(mseaborn): Record other threads too. This will require NaCl
352 // to provide an interface for suspending threads.
353 TypedMDRVA<uint32_t> list(minidump_writer);
355 if (!list.AllocateObjectAndArray(num_threads, sizeof(MDRawThread)))
357 *list.get() = num_threads;
359 MDRawThread thread = {0};
360 ConvertRegisters(minidump_writer, context, &thread);
361 *thread_context_out = thread.thread_context;
363 // Record the stack contents.
364 NaClExceptionPortableContext *pcontext =
365 nacl_exception_context_get_portable(context);
366 uintptr_t stack_start = pcontext->stack_ptr;
367 if (context->arch == EM_X86_64) {
368 // Include the x86-64 red zone too to capture local variables.
372 if (GetStackEnd(&stack_end) && stack_start <= (uintptr_t) stack_end) {
373 size_t stack_size = (uintptr_t) stack_end - stack_start;
374 stack_size = std::min(stack_size, kLimitStackDumpSize);
375 thread.stack = SnapshotMemory(minidump_writer, stack_start, stack_size);
378 list.CopyIndexAfterObject(0, &thread, sizeof(thread));
380 dirent->stream_type = MD_THREAD_LIST_STREAM;
381 dirent->location = list.location();
384 static int MinidumpArchFromElfMachine(int e_machine) {
386 case EM_386: return MD_CPU_ARCHITECTURE_X86;
387 case EM_X86_64: return MD_CPU_ARCHITECTURE_AMD64;
388 case EM_ARM: return MD_CPU_ARCHITECTURE_ARM;
389 case EM_MIPS: return MD_CPU_ARCHITECTURE_MIPS;
390 default: return MD_CPU_ARCHITECTURE_UNKNOWN;
394 static uint32_t WriteString(MinidumpAllocator *minidump_writer,
395 const char *string) {
396 int string_length = strlen(string);
397 TypedMDRVA<uint32_t> obj(minidump_writer);
398 if (!obj.AllocateObjectAndArray(string_length + 1, sizeof(uint16_t)))
400 *obj.get() = string_length * sizeof(uint16_t);
401 for (int i = 0; i < string_length + 1; ++i) {
402 ((MDString *) obj.get())->buffer[i] = string[i];
404 return obj.position();
407 static void WriteSystemInfo(MinidumpAllocator *minidump_writer,
408 MDRawDirectory *dirent,
409 struct NaClExceptionContext *context) {
410 TypedMDRVA<MDRawSystemInfo> sysinfo(minidump_writer);
411 if (!sysinfo.Allocate())
413 sysinfo.get()->processor_architecture =
414 MinidumpArchFromElfMachine(context->arch);
415 sysinfo.get()->platform_id = MD_OS_NACL;
416 sysinfo.get()->csd_version_rva = WriteString(minidump_writer, "nacl");
417 dirent->stream_type = MD_SYSTEM_INFO_STREAM;
418 dirent->location = sysinfo.location();
421 static void WriteMiscInfo(MinidumpAllocator *minidump_writer,
422 MDRawDirectory *dirent) {
423 // Write empty record to keep minidump_dump happy.
424 TypedMDRVA<MDRawMiscInfo> info(minidump_writer);
425 if (!info.Allocate())
427 info.get()->size_of_info = sizeof(MDRawMiscInfo);
428 dirent->stream_type = MD_MISC_INFO_STREAM;
429 dirent->location = info.location();
432 #if DYNAMIC_LOADING_SUPPORT
433 static int CaptureModulesCallback(
434 struct dl_phdr_info *info, size_t size, void *data) {
435 MinidumpAllocator *modules_arena = reinterpret_cast<MinidumpAllocator *>(
437 ModuleEntry **modules = reinterpret_cast<ModuleEntry **>(
438 modules_arena->data());
440 ModuleEntry *module = reinterpret_cast<ModuleEntry *>(
441 modules_arena->Alloc(sizeof(ModuleEntry)));
442 if (module == NULL) {
446 if (strlen(info->dlpi_name) > 0) {
447 module->name = modules_arena->StrDup(info->dlpi_name);
449 module->name = modules_arena->StrDup(g_module_name);
451 if (module->name == NULL)
454 // Blank these out in case we don't find values for them.
455 module->code_segment_start = 0;
456 module->code_segment_size = 0;
457 memset(&module->build_id, 0, sizeof(module->build_id));
459 bool found_code = false;
460 bool found_build_id = false;
462 for (int i = 0; i < info->dlpi_phnum; ++i) {
463 if (!found_build_id && info->dlpi_phdr[i].p_type == PT_NOTE) {
464 const char *data_ptr;
466 const void *addr = reinterpret_cast<const void *>(
467 info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
468 if (!nacl_get_build_id_from_notes(
469 addr, info->dlpi_phdr[i].p_memsz, &data_ptr, &size)) {
472 // Truncate the ID if necessary. The minidump format uses a 16
473 // byte ID, whereas ELF build IDs are typically 20-byte SHA1
475 memcpy(&module->build_id, data_ptr,
476 std::min(size, sizeof(module->build_id)));
477 found_build_id = true;
478 } else if (!found_code &&
479 info->dlpi_phdr[i].p_type == PT_LOAD &&
480 (info->dlpi_phdr[i].p_flags & PF_X) != 0) {
481 module->code_segment_start = info->dlpi_addr + info->dlpi_phdr[i].p_vaddr;
482 module->code_segment_size = info->dlpi_phdr[i].p_memsz;
487 // The entry for runnable-ld.so doesn't report a PT_LOAD segment.
488 // Don't emit it, as breakpad is confused by zero length modules.
490 module->next = *modules;
497 static void CaptureModules(MinidumpAllocator *modules_arena) {
498 // Allocate space for the pointer to the head of the module list
499 // so that it will be at modules_arena.data().
500 ModuleEntry **head = reinterpret_cast<ModuleEntry **>(
501 modules_arena->Alloc(sizeof(ModuleEntry *)));
504 dl_iterate_phdr(CaptureModulesCallback, modules_arena);
506 // TODO(bradnelson): Convert this to a test once we have the plumbing to
507 // post-process the minidumps in a test.
508 // There should be at least one module.
509 assert(*head != NULL);
513 static void WriteModuleList(MinidumpAllocator *minidump_writer,
514 MDRawDirectory *dirent) {
515 // TODO(mseaborn): Report the IRT's build ID here too, once the IRT
516 // provides an interface for querying it.
517 TypedMDRVA<uint32_t> module_list(minidump_writer);
519 #if DYNAMIC_LOADING_SUPPORT
520 MinidumpAllocator *modules_arena = __sync_lock_test_and_set(
521 &g_module_snapshot, NULL);
522 if (modules_arena == NULL) {
523 modules_arena = g_module_snapshot_workspace;
524 CaptureModules(modules_arena);
526 // NOTE: Consciously leaking modules_arena. We are crashing and about to
527 // shut down anyhow. Attempting to free it can only produce more volatility.
528 ModuleEntry **modules = reinterpret_cast<ModuleEntry **>(
529 modules_arena->data());
530 int module_count = 0;
531 for (ModuleEntry *module = *modules; module; module = module->next) {
535 int module_count = 1;
537 if (!module_list.AllocateObjectAndArray(module_count, MD_MODULE_SIZE)) {
540 *module_list.get() = module_count;
542 #if DYNAMIC_LOADING_SUPPORT
544 for (ModuleEntry *module = *modules; module; module = module->next) {
545 TypedMDRVA<MDCVInfoPDB70> cv(minidump_writer);
546 size_t name_size = strlen(module->name) + 1;
547 if (!cv.AllocateObjectAndArray(name_size, sizeof(char))) {
550 cv.get()->cv_signature = MD_CVINFOPDB70_SIGNATURE;
551 cv.get()->signature = module->build_id;
552 memcpy(cv.get()->pdb_file_name, module->name, name_size);
554 MDRawModule dst_module = {0};
555 dst_module.base_of_image = module->code_segment_start;
556 dst_module.size_of_image = module->code_segment_size;
557 dst_module.module_name_rva = WriteString(minidump_writer, module->name);
558 dst_module.cv_record = cv.location();
559 module_list.CopyIndexAfterObject(index++, &dst_module, MD_MODULE_SIZE);
562 TypedMDRVA<MDCVInfoPDB70> cv(minidump_writer);
563 size_t name_size = strlen(g_module_name) + 1;
564 if (!cv.AllocateObjectAndArray(name_size, sizeof(char))) {
567 cv.get()->cv_signature = MD_CVINFOPDB70_SIGNATURE;
568 cv.get()->signature = g_module_build_id;
569 memcpy(cv.get()->pdb_file_name, g_module_name, name_size);
571 MDRawModule dst_module = {0};
572 dst_module.base_of_image = (uintptr_t) &__executable_start;
573 dst_module.size_of_image = (uintptr_t) &__etext -
574 (uintptr_t) &__executable_start;
575 dst_module.module_name_rva = WriteString(minidump_writer, g_module_name);
576 dst_module.cv_record = cv.location();
577 module_list.CopyIndexAfterObject(0, &dst_module, MD_MODULE_SIZE);
580 dirent->stream_type = MD_MODULE_LIST_STREAM;
581 dirent->location = module_list.location();
584 static void WriteMemoryList(MinidumpAllocator *minidump_writer,
585 MDRawDirectory *dirent) {
586 // TODO(bradnelson): Actually capture memory regions.
587 // Write empty list to keep minidump_dump happy.
588 TypedMDRVA<uint32_t> memory_list(minidump_writer);
589 if (!memory_list.AllocateObjectAndArray(0, sizeof(MDMemoryDescriptor))) {
592 *memory_list.get() = 0;
594 dirent->stream_type = MD_MEMORY_LIST_STREAM;
595 dirent->location = memory_list.location();
598 static void WriteMemoryInfoList(MinidumpAllocator *minidump_writer,
599 MDRawDirectory *dirent) {
600 // TODO(bradnelson): Actually capture memory info regions.
601 // Write empty list to keep minidump_dump happy.
602 TypedMDRVA<MDRawMemoryInfoList> memory_info_list(minidump_writer);
603 if (!memory_info_list.AllocateObjectAndArray(
604 0, sizeof(MDRawMemoryInfo))) {
607 memory_info_list.get()->size_of_header = sizeof(MDRawMemoryInfoList);
608 memory_info_list.get()->size_of_entry = sizeof(MDRawMemoryInfo);
609 memory_info_list.get()->number_of_entries = 0;
611 dirent->stream_type = MD_MEMORY_INFO_LIST_STREAM;
612 dirent->location = memory_info_list.location();
615 static void WriteMinidump(MinidumpAllocator *minidump_writer,
616 struct NaClExceptionContext *context) {
617 const int kNumWriters = 7;
618 TypedMDRVA<MDRawHeader> header(minidump_writer);
619 TypedMDRVA<MDRawDirectory> dir(minidump_writer);
620 if (!header.Allocate())
622 if (!dir.AllocateArray(kNumWriters))
624 header.get()->signature = MD_HEADER_SIGNATURE;
625 header.get()->version = MD_HEADER_VERSION;
626 header.get()->time_date_stamp = time(NULL);
627 header.get()->stream_count = kNumWriters;
628 header.get()->stream_directory_rva = dir.position();
631 MDLocationDescriptor thread_context = {0};
632 WriteThreadList(minidump_writer, &dir.get()[dir_index++], context,
634 WriteExceptionList(minidump_writer, &dir.get()[dir_index++], thread_context);
635 WriteSystemInfo(minidump_writer, &dir.get()[dir_index++], context);
636 WriteMiscInfo(minidump_writer, &dir.get()[dir_index++]);
637 WriteModuleList(minidump_writer, &dir.get()[dir_index++]);
638 WriteMemoryList(minidump_writer, &dir.get()[dir_index++]);
639 WriteMemoryInfoList(minidump_writer, &dir.get()[dir_index++]);
640 assert(dir_index == kNumWriters);
643 static void CrashHandler(struct NaClExceptionContext *context) {
644 static const char msg[] = "minidump: Caught crash\n";
645 write(2, msg, sizeof(msg) - 1);
647 // Prevent re-entering the crash handler if two crashes occur
648 // concurrently. We preallocate storage that cannot be used
649 // concurrently. We avoid using a pthread mutex here in case
650 // libpthread's data structures are corrupted.
651 if (__sync_lock_test_and_set(&g_handling_exception, 1)) {
652 // Wait forever here so that the first crashing thread can report
653 // the crash and exit.
658 MinidumpAllocator *minidump_writer = g_minidump_writer;
659 WriteMinidump(minidump_writer, context);
662 g_callback_func(minidump_writer->data(), minidump_writer->size());
664 // Flush streams to aid debugging, although since the process might
665 // be in a corrupted state this might crash.
671 void nacl_minidump_register_crash_handler(void) {
672 errno = nacl_exception_set_handler(CrashHandler);
674 perror("minidump: Failed to register an exception handler");
678 #if !DYNAMIC_LOADING_SUPPORT
680 * With dynamic linking, all modules' build IDs are discovered
681 * via dl_iterate_phdr->PT_NOTE->NT_BUILD_ID. g_module_build_id
682 * is not used at all (see WriteModuleList, above).
684 if (!g_module_build_id_set) {
685 // Try to use the nexe's built-in build ID.
686 const char *data_ptr;
688 if (nacl_get_build_id(&data_ptr, &size)) {
689 // Truncate the ID if necessary. The minidump format uses a 16
690 // byte ID, whereas ELF build IDs are typically 20-byte SHA1
692 memcpy(&g_module_build_id, data_ptr,
693 std::min(size, sizeof(g_module_build_id)));
694 g_module_build_id_set = 1;
699 #if DYNAMIC_LOADING_SUPPORT
700 g_module_snapshot_workspace = new MinidumpAllocator(kLimitModuleListSize);
702 g_minidump_writer = new MinidumpAllocator(kMinidumpBufferSize);
705 void nacl_minidump_set_callback(nacl_minidump_callback_t callback) {
706 g_callback_func = callback;
709 void nacl_minidump_set_module_name(const char *module_name) {
710 g_module_name = module_name;
714 * Under dynamic linking, this interface is a no-op.
716 void nacl_minidump_set_module_build_id(
717 const uint8_t data[NACL_MINIDUMP_BUILD_ID_SIZE]) {
718 #if !DYNAMIC_LOADING_SUPPORT
719 assert(sizeof(g_module_build_id) == NACL_MINIDUMP_BUILD_ID_SIZE);
720 memcpy(&g_module_build_id, data, NACL_MINIDUMP_BUILD_ID_SIZE);
721 g_module_build_id_set = 1;
725 void nacl_minidump_snapshot_module_list(void) {
726 #if DYNAMIC_LOADING_SUPPORT
727 MinidumpAllocator *modules_arena = new MinidumpAllocator(
728 kLimitModuleListSize);
729 CaptureModules(modules_arena);
730 modules_arena = __sync_lock_test_and_set(&g_module_snapshot, modules_arena);
731 if (modules_arena != NULL)
732 delete modules_arena;
736 void nacl_minidump_clear_module_list(void) {
737 #if DYNAMIC_LOADING_SUPPORT
738 MinidumpAllocator *modules_arena = __sync_lock_test_and_set(
739 &g_module_snapshot, NULL);
740 if (modules_arena != NULL)
741 delete modules_arena;