2 * Copyright (c) 2013 The Native Client Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
10 # define DYNAMIC_LOADING_SUPPORT 1
12 # define DYNAMIC_LOADING_SUPPORT 0
14 #if DYNAMIC_LOADING_SUPPORT
28 #include "breakpad/src/google_breakpad/common/minidump_format.h"
29 #if !DYNAMIC_LOADING_SUPPORT
30 # include "native_client/src/include/elf_constants.h"
32 #include "native_client/src/include/nacl/nacl_exception.h"
33 #include "native_client/src/include/nacl/nacl_minidump.h"
34 #include "native_client/src/untrusted/minidump_generator/build_id.h"
37 extern char __executable_start[]; // Start of code segment
38 extern char __etext[]; // End of code segment
40 #if defined(__GLIBC__)
41 // Variable defined by ld.so, used as a workaround for
42 // https://code.google.com/p/nativeclient/issues/detail?id=3431.
43 extern void *__libc_stack_end;
46 class MinidumpAllocator;
48 // Restrict how much of the stack we dump to reduce upload size and to
49 // avoid dynamic allocation.
50 static const size_t kLimitStackDumpSize = 512 * 1024;
52 static const size_t kLimitNonStackSize = 64 * 1024;
54 // The crash reporter is expected to be used in a situation where the
55 // current process is damaged or out of memory, so it avoids dynamic
56 // memory allocation and allocates a fixed-size buffer of the
57 // following size at startup.
58 static const size_t kMinidumpBufferSize =
59 kLimitStackDumpSize + kLimitNonStackSize;
61 static const char *g_module_name = "main.nexe";
62 static nacl_minidump_callback_t g_callback_func;
63 static MinidumpAllocator *g_minidump_writer;
64 static int g_handling_exception = 0;
66 #if !DYNAMIC_LOADING_SUPPORT
67 static MDGUID g_module_build_id;
68 static int g_module_build_id_set;
71 class MinidumpAllocator {
77 explicit MinidumpAllocator(uint32_t size) :
78 buf_(NULL), buf_size_(0), offset_(0) {
79 void *mapping = mmap(NULL, size, PROT_READ | PROT_WRITE,
80 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
81 if (mapping == MAP_FAILED) {
82 perror("minidump: Failed to allocate memory");
85 buf_ = (char *) mapping;
89 ~MinidumpAllocator() {
91 int result = munmap(buf_, buf_size_);
93 // Suppress unused variable warning in case where assert is compiled out.
98 bool AllocateSpace(size_t size, char **ptr, uint32_t *position) {
99 if (offset_ + size >= buf_size_)
102 *ptr = buf_ + offset_;
104 memset(*ptr, 0, size);
108 void *Alloc(size_t size) {
111 if (!AllocateSpace(size, &ptr, &position))
116 char *StrDup(const char *str) {
119 size = strlen(str) + 1;
120 str_copy = reinterpret_cast<char *>(Alloc(size));
121 if (str_copy == NULL)
123 memcpy(str_copy, str, size);
127 char *data() { return buf_; }
128 size_t size() { return offset_; }
131 #if DYNAMIC_LOADING_SUPPORT
133 // Limit the number of modules to capture and their name length.
134 static const size_t kLimitModuleListSize = 64 * 1024;
137 struct ModuleEntry *next;
140 uintptr_t code_segment_start;
141 uintptr_t code_segment_size;
144 static MinidumpAllocator *g_module_snapshot;
145 static MinidumpAllocator *g_module_snapshot_workspace;
149 // TypedMDRVA represents a minidump object chunk. This interface is
150 // based on the TypedMDRVA class in the Breakpad codebase. Breakpad's
151 // implementation writes directly to a file, whereas this
152 // implementation constructs the minidump file in memory.
153 template<typename MDType>
155 MinidumpAllocator *writer_;
161 explicit TypedMDRVA(MinidumpAllocator *writer) :
167 // Allocates space for MDType.
169 return AllocateArray(1);
172 // Allocates an array of |count| elements of MDType.
173 bool AllocateArray(size_t count) {
174 size_ = sizeof(MDType) * count;
175 return writer_->AllocateSpace(size_, &ptr_, &position_);
178 // Allocates an array of |count| elements of |size| after object of MDType.
179 bool AllocateObjectAndArray(size_t count, size_t length) {
180 size_ = sizeof(MDType) + count * length;
181 return writer_->AllocateSpace(size_, &ptr_, &position_);
184 // Copy |item| to |index|. Must have been allocated using AllocateArray().
185 void CopyIndexAfterObject(unsigned int index, void *src, size_t length) {
186 size_t offset = sizeof(MDType) + index * length;
187 assert(offset + length <= size_);
188 memcpy(ptr_ + offset, src, length);
191 MDType *get() { return (MDType *) ptr_; }
193 uint32_t position() { return position_; }
195 MDLocationDescriptor location() {
196 MDLocationDescriptor location = { size_, position_ };
202 static void ConvertRegisters(MinidumpAllocator *minidump_writer,
203 struct NaClExceptionContext *context,
204 MDRawThread *thread) {
205 NaClExceptionPortableContext *pcontext =
206 nacl_exception_context_get_portable(context);
207 #define COPY_REG(REG) regs.get()->REG = src_regs->REG
208 switch (context->arch) {
210 struct NaClUserRegisterStateX8632 *src_regs =
211 (struct NaClUserRegisterStateX8632 *) &context->regs;
212 TypedMDRVA<MDRawContextX86> regs(minidump_writer);
213 if (!regs.Allocate())
215 thread->thread_context = regs.location();
216 // TODO(mseaborn): Report x87/SSE registers too.
217 regs.get()->context_flags =
218 MD_CONTEXT_X86_CONTROL | MD_CONTEXT_X86_INTEGER;
226 regs.get()->eip = src_regs->prog_ctr;
227 regs.get()->eflags = src_regs->flags;
228 regs.get()->esp = src_regs->stack_ptr;
232 struct NaClUserRegisterStateX8664 *src_regs =
233 (struct NaClUserRegisterStateX8664 *) &context->regs;
234 TypedMDRVA<MDRawContextAMD64> regs(minidump_writer);
235 if (!regs.Allocate())
237 thread->thread_context = regs.location();
238 // TODO(mseaborn): Report x87/SSE registers too.
239 regs.get()->context_flags =
240 MD_CONTEXT_AMD64_CONTROL | MD_CONTEXT_AMD64_INTEGER;
241 regs.get()->eflags = src_regs->flags;
246 regs.get()->rsp = pcontext->stack_ptr;
247 regs.get()->rbp = pcontext->frame_ptr;
258 regs.get()->rip = pcontext->prog_ctr;
262 struct NaClUserRegisterStateARM *src_regs =
263 (struct NaClUserRegisterStateARM *) &context->regs;
264 TypedMDRVA<MDRawContextARM> regs(minidump_writer);
265 if (!regs.Allocate())
267 thread->thread_context = regs.location();
268 for (int regnum = 0; regnum < 16; regnum++) {
269 regs.get()->iregs[regnum] = ((uint32_t *) &src_regs->r0)[regnum];
271 regs.get()->cpsr = src_regs->cpsr;
275 // Architecture not recognized. Dump the register state anyway.
276 // Maybe we should do this on all architectures, and Breakpad
277 // should be adapted to read NaCl's portable container format
278 // for register state so that the client code is
279 // architecture-neutral.
280 TypedMDRVA<uint8_t> regs(minidump_writer);
281 if (!regs.AllocateArray(context->size))
283 thread->thread_context = regs.location();
284 memcpy(regs.get(), &context, context->size);
291 static MDMemoryDescriptor SnapshotMemory(MinidumpAllocator *minidump_writer,
292 uintptr_t start, size_t size) {
293 TypedMDRVA<uint8_t> mem_copy(minidump_writer);
294 MDMemoryDescriptor desc = {0};
295 if (mem_copy.AllocateArray(size)) {
296 memcpy(mem_copy.get(), (void *) start, size);
298 desc.start_of_memory_range = start;
299 desc.memory = mem_copy.location();
304 static bool GetStackEnd(void **stack_end) {
305 #if defined(__GLIBC__)
309 if (pthread_getattr_np(pthread_self(), &attr) == 0 &&
310 pthread_attr_getstack(&attr, &stack_base, &stack_size) == 0) {
311 *stack_end = (void *) ((char *) stack_base + stack_size);
312 pthread_attr_destroy(&attr);
315 // pthread_getattr_np() currently fails on the initial thread. As a
316 // workaround, if we reach here, assume we are on the initial thread
317 // and get the initial thread's stack end as recorded by glibc.
318 // See https://code.google.com/p/nativeclient/issues/detail?id=3431
319 *stack_end = __libc_stack_end;
322 return pthread_get_stack_end_np(pthread_self(), stack_end) == 0;
326 static void WriteExceptionList(MinidumpAllocator *minidump_writer,
327 MDRawDirectory *dirent,
328 MDLocationDescriptor thread_context) {
329 TypedMDRVA<MDRawExceptionStream> exception(minidump_writer);
330 if (!exception.Allocate())
333 // TODO(bradnelson): Specify a particular thread once we gather more than the
335 exception.get()->thread_id = 0;
336 // TODO(bradnelson): Provide information on the type of exception once we
337 // have it. For now report everything as SIGSEGV.
338 exception.get()->exception_record.exception_code =
339 MD_EXCEPTION_CODE_LIN_SIGSEGV;
340 // TODO(bradnelson): Provide the address of the fault, once we have it.
341 exception.get()->exception_record.exception_address = 0;
342 exception.get()->thread_context = thread_context;
344 dirent->stream_type = MD_EXCEPTION_STREAM;
345 dirent->location = exception.location();
348 static void WriteThreadList(MinidumpAllocator *minidump_writer,
349 MDRawDirectory *dirent,
350 struct NaClExceptionContext *context,
351 MDLocationDescriptor *thread_context_out) {
352 // This records only the thread that crashed.
353 // TODO(mseaborn): Record other threads too. This will require NaCl
354 // to provide an interface for suspending threads.
355 TypedMDRVA<uint32_t> list(minidump_writer);
357 if (!list.AllocateObjectAndArray(num_threads, sizeof(MDRawThread)))
359 *list.get() = num_threads;
361 MDRawThread thread = {0};
362 ConvertRegisters(minidump_writer, context, &thread);
363 *thread_context_out = thread.thread_context;
365 // Record the stack contents.
366 NaClExceptionPortableContext *pcontext =
367 nacl_exception_context_get_portable(context);
368 uintptr_t stack_start = pcontext->stack_ptr;
369 if (context->arch == EM_X86_64) {
370 // Include the x86-64 red zone too to capture local variables.
374 if (GetStackEnd(&stack_end) && stack_start <= (uintptr_t) stack_end) {
375 size_t stack_size = (uintptr_t) stack_end - stack_start;
376 stack_size = std::min(stack_size, kLimitStackDumpSize);
377 thread.stack = SnapshotMemory(minidump_writer, stack_start, stack_size);
380 list.CopyIndexAfterObject(0, &thread, sizeof(thread));
382 dirent->stream_type = MD_THREAD_LIST_STREAM;
383 dirent->location = list.location();
386 static int MinidumpArchFromElfMachine(int e_machine) {
388 case EM_386: return MD_CPU_ARCHITECTURE_X86;
389 case EM_X86_64: return MD_CPU_ARCHITECTURE_AMD64;
390 case EM_ARM: return MD_CPU_ARCHITECTURE_ARM;
391 case EM_MIPS: return MD_CPU_ARCHITECTURE_MIPS;
392 default: return MD_CPU_ARCHITECTURE_UNKNOWN;
396 static uint32_t WriteString(MinidumpAllocator *minidump_writer,
397 const char *string) {
398 int string_length = strlen(string);
399 TypedMDRVA<uint32_t> obj(minidump_writer);
400 if (!obj.AllocateObjectAndArray(string_length + 1, sizeof(uint16_t)))
402 *obj.get() = string_length * sizeof(uint16_t);
403 for (int i = 0; i < string_length + 1; ++i) {
404 ((MDString *) obj.get())->buffer[i] = string[i];
406 return obj.position();
409 static void WriteSystemInfo(MinidumpAllocator *minidump_writer,
410 MDRawDirectory *dirent,
411 struct NaClExceptionContext *context) {
412 TypedMDRVA<MDRawSystemInfo> sysinfo(minidump_writer);
413 if (!sysinfo.Allocate())
415 sysinfo.get()->processor_architecture =
416 MinidumpArchFromElfMachine(context->arch);
417 sysinfo.get()->platform_id = MD_OS_NACL;
418 sysinfo.get()->csd_version_rva = WriteString(minidump_writer, "nacl");
419 dirent->stream_type = MD_SYSTEM_INFO_STREAM;
420 dirent->location = sysinfo.location();
423 static void WriteMiscInfo(MinidumpAllocator *minidump_writer,
424 MDRawDirectory *dirent) {
425 // Write empty record to keep minidump_dump happy.
426 TypedMDRVA<MDRawMiscInfo> info(minidump_writer);
427 if (!info.Allocate())
429 info.get()->size_of_info = sizeof(MDRawMiscInfo);
430 dirent->stream_type = MD_MISC_INFO_STREAM;
431 dirent->location = info.location();
434 #if DYNAMIC_LOADING_SUPPORT
435 static int CaptureModulesCallback(
436 struct dl_phdr_info *info, size_t size, void *data) {
437 MinidumpAllocator *modules_arena = reinterpret_cast<MinidumpAllocator *>(
439 ModuleEntry **modules = reinterpret_cast<ModuleEntry **>(
440 modules_arena->data());
442 ModuleEntry *module = reinterpret_cast<ModuleEntry *>(
443 modules_arena->Alloc(sizeof(ModuleEntry)));
444 if (module == NULL) {
448 if (strlen(info->dlpi_name) > 0) {
449 module->name = modules_arena->StrDup(info->dlpi_name);
451 module->name = modules_arena->StrDup(g_module_name);
453 if (module->name == NULL)
456 // Blank these out in case we don't find values for them.
457 module->code_segment_start = 0;
458 module->code_segment_size = 0;
459 memset(&module->build_id, 0, sizeof(module->build_id));
461 bool found_code = false;
462 bool found_build_id = false;
464 for (int i = 0; i < info->dlpi_phnum; ++i) {
465 if (!found_build_id && info->dlpi_phdr[i].p_type == PT_NOTE) {
466 const char *data_ptr;
468 const void *addr = reinterpret_cast<const void *>(
469 info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
470 if (!nacl_get_build_id_from_notes(
471 addr, info->dlpi_phdr[i].p_memsz, &data_ptr, &size)) {
474 // Truncate the ID if necessary. The minidump format uses a 16
475 // byte ID, whereas ELF build IDs are typically 20-byte SHA1
477 memcpy(&module->build_id, data_ptr,
478 std::min(size, sizeof(module->build_id)));
479 found_build_id = true;
480 } else if (!found_code &&
481 info->dlpi_phdr[i].p_type == PT_LOAD &&
482 (info->dlpi_phdr[i].p_flags & PF_X) != 0) {
483 module->code_segment_start = info->dlpi_addr + info->dlpi_phdr[i].p_vaddr;
484 module->code_segment_size = info->dlpi_phdr[i].p_memsz;
489 // The entry for runnable-ld.so doesn't report a PT_LOAD segment.
490 // Don't emit it, as breakpad is confused by zero length modules.
492 module->next = *modules;
499 static void CaptureModules(MinidumpAllocator *modules_arena) {
500 // Allocate space for the pointer to the head of the module list
501 // so that it will be at modules_arena.data().
502 ModuleEntry **head = reinterpret_cast<ModuleEntry **>(
503 modules_arena->Alloc(sizeof(ModuleEntry *)));
506 dl_iterate_phdr(CaptureModulesCallback, modules_arena);
508 // TODO(bradnelson): Convert this to a test once we have the plumbing to
509 // post-process the minidumps in a test.
510 // There should be at least one module.
511 assert(*head != NULL);
515 static void WriteModuleList(MinidumpAllocator *minidump_writer,
516 MDRawDirectory *dirent) {
517 // TODO(mseaborn): Report the IRT's build ID here too, once the IRT
518 // provides an interface for querying it.
519 TypedMDRVA<uint32_t> module_list(minidump_writer);
521 #if DYNAMIC_LOADING_SUPPORT
522 MinidumpAllocator *modules_arena = __sync_lock_test_and_set(
523 &g_module_snapshot, NULL);
524 if (modules_arena == NULL) {
525 modules_arena = g_module_snapshot_workspace;
526 CaptureModules(modules_arena);
528 // NOTE: Consciously leaking modules_arena. We are crashing and about to
529 // shut down anyhow. Attempting to free it can only produce more volatility.
530 ModuleEntry **modules = reinterpret_cast<ModuleEntry **>(
531 modules_arena->data());
532 int module_count = 0;
533 for (ModuleEntry *module = *modules; module; module = module->next) {
537 int module_count = 1;
539 if (!module_list.AllocateObjectAndArray(module_count, MD_MODULE_SIZE)) {
542 *module_list.get() = module_count;
544 #if DYNAMIC_LOADING_SUPPORT
546 for (ModuleEntry *module = *modules; module; module = module->next) {
547 TypedMDRVA<MDCVInfoPDB70> cv(minidump_writer);
548 size_t name_size = strlen(module->name) + 1;
549 if (!cv.AllocateObjectAndArray(name_size, sizeof(char))) {
552 cv.get()->cv_signature = MD_CVINFOPDB70_SIGNATURE;
553 cv.get()->signature = module->build_id;
554 memcpy(cv.get()->pdb_file_name, module->name, name_size);
556 MDRawModule dst_module = {0};
557 dst_module.base_of_image = module->code_segment_start;
558 dst_module.size_of_image = module->code_segment_size;
559 dst_module.module_name_rva = WriteString(minidump_writer, module->name);
560 dst_module.cv_record = cv.location();
561 module_list.CopyIndexAfterObject(index++, &dst_module, MD_MODULE_SIZE);
564 TypedMDRVA<MDCVInfoPDB70> cv(minidump_writer);
565 size_t name_size = strlen(g_module_name) + 1;
566 if (!cv.AllocateObjectAndArray(name_size, sizeof(char))) {
569 cv.get()->cv_signature = MD_CVINFOPDB70_SIGNATURE;
570 cv.get()->signature = g_module_build_id;
571 memcpy(cv.get()->pdb_file_name, g_module_name, name_size);
573 MDRawModule dst_module = {0};
574 dst_module.base_of_image = (uintptr_t) &__executable_start;
575 dst_module.size_of_image = (uintptr_t) &__etext -
576 (uintptr_t) &__executable_start;
577 dst_module.module_name_rva = WriteString(minidump_writer, g_module_name);
578 dst_module.cv_record = cv.location();
579 module_list.CopyIndexAfterObject(0, &dst_module, MD_MODULE_SIZE);
582 dirent->stream_type = MD_MODULE_LIST_STREAM;
583 dirent->location = module_list.location();
586 static void WriteMemoryList(MinidumpAllocator *minidump_writer,
587 MDRawDirectory *dirent) {
588 // TODO(bradnelson): Actually capture memory regions.
589 // Write empty list to keep minidump_dump happy.
590 TypedMDRVA<uint32_t> memory_list(minidump_writer);
591 if (!memory_list.AllocateObjectAndArray(0, sizeof(MDMemoryDescriptor))) {
594 *memory_list.get() = 0;
596 dirent->stream_type = MD_MEMORY_LIST_STREAM;
597 dirent->location = memory_list.location();
600 static void WriteMemoryInfoList(MinidumpAllocator *minidump_writer,
601 MDRawDirectory *dirent) {
602 // TODO(bradnelson): Actually capture memory info regions.
603 // Write empty list to keep minidump_dump happy.
604 TypedMDRVA<MDRawMemoryInfoList> memory_info_list(minidump_writer);
605 if (!memory_info_list.AllocateObjectAndArray(
606 0, sizeof(MDRawMemoryInfo))) {
609 memory_info_list.get()->size_of_header = sizeof(MDRawMemoryInfoList);
610 memory_info_list.get()->size_of_entry = sizeof(MDRawMemoryInfo);
611 memory_info_list.get()->number_of_entries = 0;
613 dirent->stream_type = MD_MEMORY_INFO_LIST_STREAM;
614 dirent->location = memory_info_list.location();
617 static void WriteMinidump(MinidumpAllocator *minidump_writer,
618 struct NaClExceptionContext *context) {
619 const int kNumWriters = 7;
620 TypedMDRVA<MDRawHeader> header(minidump_writer);
621 TypedMDRVA<MDRawDirectory> dir(minidump_writer);
622 if (!header.Allocate())
624 if (!dir.AllocateArray(kNumWriters))
626 header.get()->signature = MD_HEADER_SIGNATURE;
627 header.get()->version = MD_HEADER_VERSION;
628 header.get()->time_date_stamp = time(NULL);
629 header.get()->stream_count = kNumWriters;
630 header.get()->stream_directory_rva = dir.position();
633 MDLocationDescriptor thread_context = {0};
634 WriteThreadList(minidump_writer, &dir.get()[dir_index++], context,
636 WriteExceptionList(minidump_writer, &dir.get()[dir_index++], thread_context);
637 WriteSystemInfo(minidump_writer, &dir.get()[dir_index++], context);
638 WriteMiscInfo(minidump_writer, &dir.get()[dir_index++]);
639 WriteModuleList(minidump_writer, &dir.get()[dir_index++]);
640 WriteMemoryList(minidump_writer, &dir.get()[dir_index++]);
641 WriteMemoryInfoList(minidump_writer, &dir.get()[dir_index++]);
642 assert(dir_index == kNumWriters);
645 static void CrashHandler(struct NaClExceptionContext *context) {
646 static const char msg[] = "minidump: Caught crash\n";
647 write(2, msg, sizeof(msg) - 1);
649 // Prevent re-entering the crash handler if two crashes occur
650 // concurrently. We preallocate storage that cannot be used
651 // concurrently. We avoid using a pthread mutex here in case
652 // libpthread's data structures are corrupted.
653 if (__sync_lock_test_and_set(&g_handling_exception, 1)) {
654 // Wait forever here so that the first crashing thread can report
655 // the crash and exit.
660 MinidumpAllocator *minidump_writer = g_minidump_writer;
661 WriteMinidump(minidump_writer, context);
664 g_callback_func(minidump_writer->data(), minidump_writer->size());
666 // Flush streams to aid debugging, although since the process might
667 // be in a corrupted state this might crash.
673 void nacl_minidump_register_crash_handler(void) {
674 errno = nacl_exception_set_handler(CrashHandler);
676 perror("minidump: Failed to register an exception handler");
680 #if !DYNAMIC_LOADING_SUPPORT
682 * With dynamic linking, all modules' build IDs are discovered
683 * via dl_iterate_phdr->PT_NOTE->NT_BUILD_ID. g_module_build_id
684 * is not used at all (see WriteModuleList, above).
686 if (!g_module_build_id_set) {
687 // Try to use the nexe's built-in build ID.
688 const char *data_ptr;
690 if (nacl_get_build_id(&data_ptr, &size)) {
691 // Truncate the ID if necessary. The minidump format uses a 16
692 // byte ID, whereas ELF build IDs are typically 20-byte SHA1
694 memcpy(&g_module_build_id, data_ptr,
695 std::min(size, sizeof(g_module_build_id)));
696 g_module_build_id_set = 1;
701 #if DYNAMIC_LOADING_SUPPORT
702 g_module_snapshot_workspace = new MinidumpAllocator(kLimitModuleListSize);
704 g_minidump_writer = new MinidumpAllocator(kMinidumpBufferSize);
707 void nacl_minidump_set_callback(nacl_minidump_callback_t callback) {
708 g_callback_func = callback;
711 void nacl_minidump_set_module_name(const char *module_name) {
712 g_module_name = module_name;
716 * Under dynamic linking, this interface is a no-op.
718 void nacl_minidump_set_module_build_id(
719 const uint8_t data[NACL_MINIDUMP_BUILD_ID_SIZE]) {
720 #if !DYNAMIC_LOADING_SUPPORT
721 assert(sizeof(g_module_build_id) == NACL_MINIDUMP_BUILD_ID_SIZE);
722 memcpy(&g_module_build_id, data, NACL_MINIDUMP_BUILD_ID_SIZE);
723 g_module_build_id_set = 1;
727 void nacl_minidump_snapshot_module_list(void) {
728 #if DYNAMIC_LOADING_SUPPORT
729 MinidumpAllocator *modules_arena = new MinidumpAllocator(
730 kLimitModuleListSize);
731 CaptureModules(modules_arena);
732 modules_arena = __sync_lock_test_and_set(&g_module_snapshot, modules_arena);
733 if (modules_arena != NULL)
734 delete modules_arena;
738 void nacl_minidump_clear_module_list(void) {
739 #if DYNAMIC_LOADING_SUPPORT
740 MinidumpAllocator *modules_arena = __sync_lock_test_and_set(
741 &g_module_snapshot, NULL);
742 if (modules_arena != NULL)
743 delete modules_arena;