1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/process/memory.h"
7 #include <CoreFoundation/CoreFoundation.h>
10 #include <mach/mach_vm.h>
11 #include <malloc/malloc.h>
12 #import <objc/runtime.h>
16 #include "base/lazy_instance.h"
17 #include "base/logging.h"
18 #include "base/mac/mac_util.h"
19 #include "base/scoped_clear_errno.h"
20 #include "third_party/apple_apsl/CFBase.h"
21 #include "third_party/apple_apsl/malloc.h"
25 #include <mach-o/nlist.h>
27 #include "base/threading/thread_local.h"
28 #include "third_party/mach_override/mach_override.h"
29 #endif // ARCH_CPU_32_BITS
33 // These are helpers for EnableTerminationOnHeapCorruption, which is a no-op
38 // Finds the library path for malloc() and thus the libC part of libSystem,
39 // which in Lion is in a separate image.
40 const char* LookUpLibCPath() {
41 const void* addr = reinterpret_cast<void*>(&malloc);
44 if (dladdr(addr, &info))
45 return info.dli_fname;
47 DLOG(WARNING) << "Could not find image path for malloc()";
51 typedef void(*malloc_error_break_t)(void);
52 malloc_error_break_t g_original_malloc_error_break = NULL;
54 // Returns the function pointer for malloc_error_break. This symbol is declared
55 // as __private_extern__ and cannot be dlsym()ed. Instead, use nlist() to
57 malloc_error_break_t LookUpMallocErrorBreak() {
58 const char* lib_c_path = LookUpLibCPath();
62 // Only need to look up two symbols, but nlist() requires a NULL-terminated
63 // array and takes no count.
65 bzero(&nl, sizeof(nl));
67 // The symbol to find.
68 nl[0].n_un.n_name = const_cast<char*>("_malloc_error_break");
70 // A reference symbol by which the address of the desired symbol will be
72 nl[1].n_un.n_name = const_cast<char*>("_malloc");
74 int rv = nlist(lib_c_path, nl);
75 if (rv != 0 || nl[0].n_type == N_UNDF || nl[1].n_type == N_UNDF) {
79 // nlist() returns addresses as offsets in the image, not the instruction
80 // pointer in memory. Use the known in-memory address of malloc()
81 // to compute the offset for malloc_error_break().
82 uintptr_t reference_addr = reinterpret_cast<uintptr_t>(&malloc);
83 reference_addr -= nl[1].n_value;
84 reference_addr += nl[0].n_value;
86 return reinterpret_cast<malloc_error_break_t>(reference_addr);
89 // Combines ThreadLocalBoolean with AutoReset. It would be convenient
90 // to compose ThreadLocalPointer<bool> with base::AutoReset<bool>, but that
91 // would require allocating some storage for the bool.
92 class ThreadLocalBooleanAutoReset {
94 ThreadLocalBooleanAutoReset(ThreadLocalBoolean* tlb, bool new_value)
96 original_value_(tlb->Get()) {
97 scoped_tlb_->Set(new_value);
99 ~ThreadLocalBooleanAutoReset() {
100 scoped_tlb_->Set(original_value_);
104 ThreadLocalBoolean* scoped_tlb_;
105 bool original_value_;
107 DISALLOW_COPY_AND_ASSIGN(ThreadLocalBooleanAutoReset);
110 base::LazyInstance<ThreadLocalBoolean>::Leaky
111 g_unchecked_alloc = LAZY_INSTANCE_INITIALIZER;
113 // NOTE(shess): This is called when the malloc library noticed that the heap
114 // is fubar. Avoid calls which will re-enter the malloc library.
115 void CrMallocErrorBreak() {
116 g_original_malloc_error_break();
118 // Out of memory is certainly not heap corruption, and not necessarily
119 // something for which the process should be terminated. Leave that decision
120 // to the OOM killer. The EBADF case comes up because the malloc library
121 // attempts to log to ASL (syslog) before calling this code, which fails
122 // accessing a Unix-domain socket because of sandboxing.
123 if (errno == ENOMEM || (errno == EBADF && g_unchecked_alloc.Get().Get()))
126 // A unit test checks this error message, so it needs to be in release builds.
128 "Terminating process due to a potential for future heap corruption: "
131 '0' + ((errno / 100) % 10),
132 '0' + ((errno / 10) % 10),
136 COMPILE_ASSERT(ELAST <= 999, errno_too_large_to_encode);
137 strlcat(buf, errnobuf, sizeof(buf));
140 // Crash by writing to NULL+errno to allow analyzing errno from
141 // crash dump info (setting a breakpad key would re-enter the malloc
142 // library). Max documented errno in intro(2) is actually 102, but
143 // it really just needs to be "small" to stay on the right vm page.
144 const int kMaxErrno = 256;
145 char* volatile death_ptr = NULL;
146 death_ptr += std::min(errno, kMaxErrno);
151 #endif // ARCH_CPU_32_BITS
153 void EnableTerminationOnHeapCorruption() {
154 #if defined(ADDRESS_SANITIZER) || ARCH_CPU_64_BITS
155 // AddressSanitizer handles heap corruption, and on 64 bit Macs, the malloc
156 // system automatically abort()s on heap corruption.
159 // Only override once, otherwise CrMallocErrorBreak() will recurse
161 if (g_original_malloc_error_break)
164 malloc_error_break_t malloc_error_break = LookUpMallocErrorBreak();
165 if (!malloc_error_break) {
166 DLOG(WARNING) << "Could not find malloc_error_break";
170 mach_error_t err = mach_override_ptr(
171 (void*)malloc_error_break,
172 (void*)&CrMallocErrorBreak,
173 (void**)&g_original_malloc_error_break);
176 DLOG(WARNING) << "Could not override malloc_error_break; error = " << err;
177 #endif // defined(ADDRESS_SANITIZER) || ARCH_CPU_64_BITS
180 // ------------------------------------------------------------------------
184 bool g_oom_killer_enabled;
186 // Starting with Mac OS X 10.7, the zone allocators set up by the system are
187 // read-only, to prevent them from being overwritten in an attack. However,
188 // blindly unprotecting and reprotecting the zone allocators fails with
189 // GuardMalloc because GuardMalloc sets up its zone allocator using a block of
190 // memory in its bss. Explicit saving/restoring of the protection is required.
192 // This function takes a pointer to a malloc zone, de-protects it if necessary,
193 // and returns (in the out parameters) a region of memory (if any) to be
194 // re-protected when modifications are complete. This approach assumes that
195 // there is no contention for the protection of this memory.
196 void DeprotectMallocZone(ChromeMallocZone* default_zone,
197 mach_vm_address_t* reprotection_start,
198 mach_vm_size_t* reprotection_length,
199 vm_prot_t* reprotection_value) {
201 *reprotection_start = reinterpret_cast<mach_vm_address_t>(default_zone);
202 struct vm_region_basic_info_64 info;
203 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
204 kern_return_t result =
205 mach_vm_region(mach_task_self(),
208 VM_REGION_BASIC_INFO_64,
209 reinterpret_cast<vm_region_info_t>(&info),
212 CHECK(result == KERN_SUCCESS);
214 result = mach_port_deallocate(mach_task_self(), unused);
215 CHECK(result == KERN_SUCCESS);
217 // Does the region fully enclose the zone pointers? Possibly unwarranted
218 // simplification used: using the size of a full version 8 malloc zone rather
219 // than the actual smaller size if the passed-in zone is not version 8.
220 CHECK(*reprotection_start <=
221 reinterpret_cast<mach_vm_address_t>(default_zone));
222 mach_vm_size_t zone_offset = reinterpret_cast<mach_vm_size_t>(default_zone) -
223 reinterpret_cast<mach_vm_size_t>(*reprotection_start);
224 CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
226 if (info.protection & VM_PROT_WRITE) {
227 // No change needed; the zone is already writable.
228 *reprotection_start = 0;
229 *reprotection_length = 0;
230 *reprotection_value = VM_PROT_NONE;
232 *reprotection_value = info.protection;
233 result = mach_vm_protect(mach_task_self(),
235 *reprotection_length,
237 info.protection | VM_PROT_WRITE);
238 CHECK(result == KERN_SUCCESS);
242 // === C malloc/calloc/valloc/realloc/posix_memalign ===
244 typedef void* (*malloc_type)(struct _malloc_zone_t* zone,
246 typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
249 typedef void* (*valloc_type)(struct _malloc_zone_t* zone,
251 typedef void (*free_type)(struct _malloc_zone_t* zone,
253 typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
256 typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
260 malloc_type g_old_malloc;
261 calloc_type g_old_calloc;
262 valloc_type g_old_valloc;
263 free_type g_old_free;
264 realloc_type g_old_realloc;
265 memalign_type g_old_memalign;
267 malloc_type g_old_malloc_purgeable;
268 calloc_type g_old_calloc_purgeable;
269 valloc_type g_old_valloc_purgeable;
270 free_type g_old_free_purgeable;
271 realloc_type g_old_realloc_purgeable;
272 memalign_type g_old_memalign_purgeable;
274 void* oom_killer_malloc(struct _malloc_zone_t* zone,
277 ScopedClearErrno clear_errno;
278 #endif // ARCH_CPU_32_BITS
279 void* result = g_old_malloc(zone, size);
281 debug::BreakDebugger();
285 void* oom_killer_calloc(struct _malloc_zone_t* zone,
289 ScopedClearErrno clear_errno;
290 #endif // ARCH_CPU_32_BITS
291 void* result = g_old_calloc(zone, num_items, size);
292 if (!result && num_items && size)
293 debug::BreakDebugger();
297 void* oom_killer_valloc(struct _malloc_zone_t* zone,
300 ScopedClearErrno clear_errno;
301 #endif // ARCH_CPU_32_BITS
302 void* result = g_old_valloc(zone, size);
304 debug::BreakDebugger();
308 void oom_killer_free(struct _malloc_zone_t* zone,
311 ScopedClearErrno clear_errno;
312 #endif // ARCH_CPU_32_BITS
313 g_old_free(zone, ptr);
316 void* oom_killer_realloc(struct _malloc_zone_t* zone,
320 ScopedClearErrno clear_errno;
321 #endif // ARCH_CPU_32_BITS
322 void* result = g_old_realloc(zone, ptr, size);
324 debug::BreakDebugger();
328 void* oom_killer_memalign(struct _malloc_zone_t* zone,
332 ScopedClearErrno clear_errno;
333 #endif // ARCH_CPU_32_BITS
334 void* result = g_old_memalign(zone, alignment, size);
335 // Only die if posix_memalign would have returned ENOMEM, since there are
336 // other reasons why NULL might be returned (see
337 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
338 if (!result && size && alignment >= sizeof(void*)
339 && (alignment & (alignment - 1)) == 0) {
340 debug::BreakDebugger();
345 void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone,
348 ScopedClearErrno clear_errno;
349 #endif // ARCH_CPU_32_BITS
350 void* result = g_old_malloc_purgeable(zone, size);
352 debug::BreakDebugger();
356 void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
360 ScopedClearErrno clear_errno;
361 #endif // ARCH_CPU_32_BITS
362 void* result = g_old_calloc_purgeable(zone, num_items, size);
363 if (!result && num_items && size)
364 debug::BreakDebugger();
368 void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone,
371 ScopedClearErrno clear_errno;
372 #endif // ARCH_CPU_32_BITS
373 void* result = g_old_valloc_purgeable(zone, size);
375 debug::BreakDebugger();
379 void oom_killer_free_purgeable(struct _malloc_zone_t* zone,
382 ScopedClearErrno clear_errno;
383 #endif // ARCH_CPU_32_BITS
384 g_old_free_purgeable(zone, ptr);
387 void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
391 ScopedClearErrno clear_errno;
392 #endif // ARCH_CPU_32_BITS
393 void* result = g_old_realloc_purgeable(zone, ptr, size);
395 debug::BreakDebugger();
399 void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
403 ScopedClearErrno clear_errno;
404 #endif // ARCH_CPU_32_BITS
405 void* result = g_old_memalign_purgeable(zone, alignment, size);
406 // Only die if posix_memalign would have returned ENOMEM, since there are
407 // other reasons why NULL might be returned (see
408 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
409 if (!result && size && alignment >= sizeof(void*)
410 && (alignment & (alignment - 1)) == 0) {
411 debug::BreakDebugger();
416 // === C++ operator new ===
418 void oom_killer_new() {
419 debug::BreakDebugger();
422 // === Core Foundation CFAllocators ===
424 bool CanGetContextForCFAllocator() {
425 return !base::mac::IsOSLaterThanMavericks_DontCallThis();
428 CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
429 if (base::mac::IsOSSnowLeopard()) {
430 ChromeCFAllocatorLeopards* our_allocator =
431 const_cast<ChromeCFAllocatorLeopards*>(
432 reinterpret_cast<const ChromeCFAllocatorLeopards*>(allocator));
433 return &our_allocator->_context;
434 } else if (base::mac::IsOSLion() ||
435 base::mac::IsOSMountainLion() ||
436 base::mac::IsOSMavericks()) {
437 ChromeCFAllocatorLions* our_allocator =
438 const_cast<ChromeCFAllocatorLions*>(
439 reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
440 return &our_allocator->_context;
446 CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
447 CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
448 CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
450 void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
453 void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
455 debug::BreakDebugger();
459 void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
462 void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
464 debug::BreakDebugger();
468 void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
471 void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
473 debug::BreakDebugger();
477 // === Cocoa NSObject allocation ===
479 typedef id (*allocWithZone_t)(id, SEL, NSZone*);
480 allocWithZone_t g_old_allocWithZone;
482 id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone)
484 id result = g_old_allocWithZone(self, _cmd, zone);
486 debug::BreakDebugger();
492 void* UncheckedMalloc(size_t size) {
495 ScopedClearErrno clear_errno;
496 ThreadLocalBooleanAutoReset flag(g_unchecked_alloc.Pointer(), true);
497 #endif // ARCH_CPU_32_BITS
498 return g_old_malloc(malloc_default_zone(), size);
503 void* UncheckedCalloc(size_t num_items, size_t size) {
506 ScopedClearErrno clear_errno;
507 ThreadLocalBooleanAutoReset flag(g_unchecked_alloc.Pointer(), true);
508 #endif // ARCH_CPU_32_BITS
509 return g_old_calloc(malloc_default_zone(), num_items, size);
511 return calloc(num_items, size);
514 void EnableTerminationOnOutOfMemory() {
515 if (g_oom_killer_enabled)
518 g_oom_killer_enabled = true;
520 // === C malloc/calloc/valloc/realloc/posix_memalign ===
522 // This approach is not perfect, as requests for amounts of memory larger than
523 // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
524 // still fail with a NULL rather than dying (see
525 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details).
526 // Unfortunately, it's the best we can do. Also note that this does not affect
527 // allocations from non-default zones.
529 CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc &&
530 !g_old_memalign) << "Old allocators unexpectedly non-null";
532 CHECK(!g_old_malloc_purgeable && !g_old_calloc_purgeable &&
533 !g_old_valloc_purgeable && !g_old_realloc_purgeable &&
534 !g_old_memalign_purgeable) << "Old allocators unexpectedly non-null";
536 #if !defined(ADDRESS_SANITIZER)
537 // Don't do anything special on OOM for the malloc zones replaced by
538 // AddressSanitizer, as modifying or protecting them may not work correctly.
540 ChromeMallocZone* default_zone =
541 reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
542 ChromeMallocZone* purgeable_zone =
543 reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
545 mach_vm_address_t default_reprotection_start = 0;
546 mach_vm_size_t default_reprotection_length = 0;
547 vm_prot_t default_reprotection_value = VM_PROT_NONE;
548 DeprotectMallocZone(default_zone,
549 &default_reprotection_start,
550 &default_reprotection_length,
551 &default_reprotection_value);
553 mach_vm_address_t purgeable_reprotection_start = 0;
554 mach_vm_size_t purgeable_reprotection_length = 0;
555 vm_prot_t purgeable_reprotection_value = VM_PROT_NONE;
556 if (purgeable_zone) {
557 DeprotectMallocZone(purgeable_zone,
558 &purgeable_reprotection_start,
559 &purgeable_reprotection_length,
560 &purgeable_reprotection_value);
565 g_old_malloc = default_zone->malloc;
566 g_old_calloc = default_zone->calloc;
567 g_old_valloc = default_zone->valloc;
568 g_old_free = default_zone->free;
569 g_old_realloc = default_zone->realloc;
570 CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_free &&
572 << "Failed to get system allocation functions.";
574 default_zone->malloc = oom_killer_malloc;
575 default_zone->calloc = oom_killer_calloc;
576 default_zone->valloc = oom_killer_valloc;
577 default_zone->free = oom_killer_free;
578 default_zone->realloc = oom_killer_realloc;
580 if (default_zone->version >= 5) {
581 g_old_memalign = default_zone->memalign;
583 default_zone->memalign = oom_killer_memalign;
586 // Purgeable zone (if it exists)
588 if (purgeable_zone) {
589 g_old_malloc_purgeable = purgeable_zone->malloc;
590 g_old_calloc_purgeable = purgeable_zone->calloc;
591 g_old_valloc_purgeable = purgeable_zone->valloc;
592 g_old_free_purgeable = purgeable_zone->free;
593 g_old_realloc_purgeable = purgeable_zone->realloc;
594 CHECK(g_old_malloc_purgeable && g_old_calloc_purgeable &&
595 g_old_valloc_purgeable && g_old_free_purgeable &&
596 g_old_realloc_purgeable)
597 << "Failed to get system allocation functions.";
599 purgeable_zone->malloc = oom_killer_malloc_purgeable;
600 purgeable_zone->calloc = oom_killer_calloc_purgeable;
601 purgeable_zone->valloc = oom_killer_valloc_purgeable;
602 purgeable_zone->free = oom_killer_free_purgeable;
603 purgeable_zone->realloc = oom_killer_realloc_purgeable;
605 if (purgeable_zone->version >= 5) {
606 g_old_memalign_purgeable = purgeable_zone->memalign;
607 if (g_old_memalign_purgeable)
608 purgeable_zone->memalign = oom_killer_memalign_purgeable;
612 // Restore protection if it was active.
614 if (default_reprotection_start) {
615 kern_return_t result = mach_vm_protect(mach_task_self(),
616 default_reprotection_start,
617 default_reprotection_length,
619 default_reprotection_value);
620 CHECK(result == KERN_SUCCESS);
623 if (purgeable_reprotection_start) {
624 kern_return_t result = mach_vm_protect(mach_task_self(),
625 purgeable_reprotection_start,
626 purgeable_reprotection_length,
628 purgeable_reprotection_value);
629 CHECK(result == KERN_SUCCESS);
633 // === C malloc_zone_batch_malloc ===
635 // batch_malloc is omitted because the default malloc zone's implementation
636 // only supports batch_malloc for "tiny" allocations from the free list. It
637 // will fail for allocations larger than "tiny", and will only allocate as
638 // many blocks as it's able to from the free list. These factors mean that it
639 // can return less than the requested memory even in a non-out-of-memory
640 // situation. There's no good way to detect whether a batch_malloc failure is
641 // due to these other factors, or due to genuine memory or address space
642 // exhaustion. The fact that it only allocates space from the "tiny" free list
643 // means that it's likely that a failure will not be due to memory exhaustion.
644 // Similarly, these constraints on batch_malloc mean that callers must always
645 // be expecting to receive less memory than was requested, even in situations
646 // where memory pressure is not a concern. Finally, the only public interface
647 // to batch_malloc is malloc_zone_batch_malloc, which is specific to the
648 // system's malloc implementation. It's unlikely that anyone's even heard of
651 // === C++ operator new ===
653 // Yes, operator new does call through to malloc, but this will catch failures
654 // that our imperfect handling of malloc cannot.
656 std::set_new_handler(oom_killer_new);
658 #ifndef ADDRESS_SANITIZER
659 // === Core Foundation CFAllocators ===
661 // This will not catch allocation done by custom allocators, but will catch
662 // all allocation done by system-provided ones.
664 CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
665 !g_old_cfallocator_malloc_zone)
666 << "Old allocators unexpectedly non-null";
668 bool cf_allocator_internals_known = CanGetContextForCFAllocator();
670 if (cf_allocator_internals_known) {
671 CFAllocatorContext* context =
672 ContextForCFAllocator(kCFAllocatorSystemDefault);
673 CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
674 g_old_cfallocator_system_default = context->allocate;
675 CHECK(g_old_cfallocator_system_default)
676 << "Failed to get kCFAllocatorSystemDefault allocation function.";
677 context->allocate = oom_killer_cfallocator_system_default;
679 context = ContextForCFAllocator(kCFAllocatorMalloc);
680 CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
681 g_old_cfallocator_malloc = context->allocate;
682 CHECK(g_old_cfallocator_malloc)
683 << "Failed to get kCFAllocatorMalloc allocation function.";
684 context->allocate = oom_killer_cfallocator_malloc;
686 context = ContextForCFAllocator(kCFAllocatorMallocZone);
687 CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
688 g_old_cfallocator_malloc_zone = context->allocate;
689 CHECK(g_old_cfallocator_malloc_zone)
690 << "Failed to get kCFAllocatorMallocZone allocation function.";
691 context->allocate = oom_killer_cfallocator_malloc_zone;
693 NSLog(@"Internals of CFAllocator not known; out-of-memory failures via "
694 "CFAllocator will not result in termination. http://crbug.com/45650");
698 // === Cocoa NSObject allocation ===
700 // Note that both +[NSObject new] and +[NSObject alloc] call through to
701 // +[NSObject allocWithZone:].
703 CHECK(!g_old_allocWithZone)
704 << "Old allocator unexpectedly non-null";
706 Class nsobject_class = [NSObject class];
707 Method orig_method = class_getClassMethod(nsobject_class,
708 @selector(allocWithZone:));
709 g_old_allocWithZone = reinterpret_cast<allocWithZone_t>(
710 method_getImplementation(orig_method));
711 CHECK(g_old_allocWithZone)
712 << "Failed to get allocWithZone allocation function.";
713 method_setImplementation(orig_method,
714 reinterpret_cast<IMP>(oom_killer_allocWithZone));