2 * Copyright (c) 2012 The Native Client Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
8 * NaCl helper functions to deal with elf images
11 #include "native_client/src/include/portability.h"
18 #define NACL_LOG_MODULE_NAME "elf_util"
20 #include "native_client/src/include/elf_constants.h"
21 #include "native_client/src/include/elf.h"
22 #include "native_client/src/include/nacl_macros.h"
23 #include "native_client/src/include/nacl_platform.h"
25 #include "native_client/src/shared/gio/gio.h"
26 #include "native_client/src/shared/platform/nacl_check.h"
27 #include "native_client/src/shared/platform/nacl_host_desc.h"
28 #include "native_client/src/shared/platform/nacl_log.h"
30 #include "native_client/src/trusted/desc/nacl_desc_effector_trusted_mem.h"
31 #include "native_client/src/trusted/fault_injection/fault_injection.h"
32 #include "native_client/src/trusted/perf_counter/nacl_perf_counter.h"
33 #include "native_client/src/trusted/service_runtime/elf_util.h"
34 #include "native_client/src/trusted/service_runtime/include/bits/mman.h"
35 #include "native_client/src/trusted/service_runtime/nacl_config.h"
36 #include "native_client/src/trusted/service_runtime/nacl_text.h"
37 #include "native_client/src/trusted/service_runtime/nacl_valgrind_hooks.h"
38 #include "native_client/src/trusted/service_runtime/sel_memory.h"
39 #include "native_client/src/trusted/service_runtime/sys_memory.h"
40 #include "native_client/src/trusted/validator/validation_metadata.h"
45 Elf_Phdr phdrs[NACL_MAX_PROGRAM_HEADERS];
46 int loadable[NACL_MAX_PROGRAM_HEADERS];
50 enum NaClPhdrCheckAction {
55 PCA_IGNORE /* ignore this segment. */
59 struct NaClPhdrChecks {
61 Elf_Word p_flags; /* rwx */
62 enum NaClPhdrCheckAction action;
63 int required; /* only for text for now */
64 Elf_Addr p_vaddr; /* if non-zero, vaddr must be this */
68 * Other than empty segments, these are the only ones that are allowed.
70 static const struct NaClPhdrChecks nacl_phdr_check_data[] = {
72 { PT_PHDR, PF_R, PCA_IGNORE, 0, 0, },
74 { PT_LOAD, PF_R|PF_X, PCA_TEXT_CHECK, 1, NACL_TRAMPOLINE_END, },
76 { PT_LOAD, PF_R, PCA_RODATA, 0, 0, },
78 { PT_LOAD, PF_R|PF_W, PCA_DATA, 0, 0, },
80 { PT_TLS, PF_R, PCA_IGNORE, 0, 0},
81 #if NACL_ARCH(NACL_BUILD_ARCH) == NACL_arm
82 /* arm exception handling unwind info (for c++)*/
83 /* TODO(robertm): for some reason this does NOT end up in ro maybe because
84 * it is relocatable. Try hacking the linker script to move it.
86 { PT_ARM_EXIDX, PF_R, PCA_IGNORE, 0, 0, },
89 * allow optional GNU stack permission marker, but require that the
90 * stack is non-executable.
92 { PT_GNU_STACK, PF_R|PF_W, PCA_NONE, 0, 0, },
93 /* ignored segments */
94 { PT_DYNAMIC, PF_R|PF_W, PCA_IGNORE, 0, 0},
96 * PT_DYNAMIC with PF_R doesn't occur in practice, but leaving it here just
97 * in case it has been used.
99 { PT_DYNAMIC, PF_R, PCA_IGNORE, 0, 0},
100 { PT_INTERP, PF_R, PCA_IGNORE, 0, 0},
101 { PT_NOTE, PF_R, PCA_IGNORE, 0, 0},
102 { PT_GNU_EH_FRAME, PF_R, PCA_IGNORE, 0, 0},
103 { PT_GNU_RELRO, PF_R, PCA_IGNORE, 0, 0},
104 #if NACL_ARCH(NACL_BUILD_ARCH) == NACL_mips
105 { PT_MIPS_REGINFO, PF_R, PCA_IGNORE, 0, 0},
107 { PT_NULL, PF_R, PCA_IGNORE, 0, 0},
111 static void NaClDumpElfHeader(int loglevel, Elf_Ehdr *elf_hdr) {
113 #define DUMP(m,f) do { NaClLog(loglevel, \
115 elf_hdr->m); } while (0)
117 NaClLog(loglevel, "=================================================\n");
118 NaClLog(loglevel, "Elf header\n");
119 NaClLog(loglevel, "==================================================\n");
121 DUMP(e_ident+1, ".3s");
123 DUMP(e_machine, "#x");
124 DUMP(e_version, "#x");
125 DUMP(e_entry, "#"NACL_PRIxElf_Addr);
126 DUMP(e_phoff, "#"NACL_PRIxElf_Off);
127 DUMP(e_shoff, "#"NACL_PRIxElf_Off);
128 DUMP(e_flags, "#"NACL_PRIxElf_Word);
129 DUMP(e_ehsize, "#"NACL_PRIxElf_Half);
130 DUMP(e_phentsize, "#"NACL_PRIxElf_Half);
131 DUMP(e_phnum, "#"NACL_PRIxElf_Half);
132 DUMP(e_shentsize, "#"NACL_PRIxElf_Half);
133 DUMP(e_shnum, "#"NACL_PRIxElf_Half);
134 DUMP(e_shstrndx, "#"NACL_PRIxElf_Half);
136 NaClLog(loglevel, "sizeof(Elf32_Ehdr) = 0x%x\n", (int) sizeof *elf_hdr);
140 static void NaClDumpElfProgramHeader(int loglevel,
142 #define DUMP(mem, f) do { \
143 NaClLog(loglevel, "%s: %" f "\n", #mem, phdr->mem); \
146 DUMP(p_type, NACL_PRIxElf_Word);
147 DUMP(p_offset, NACL_PRIxElf_Off);
148 DUMP(p_vaddr, NACL_PRIxElf_Addr);
149 DUMP(p_paddr, NACL_PRIxElf_Addr);
150 DUMP(p_filesz, NACL_PRIxElf_Xword);
151 DUMP(p_memsz, NACL_PRIxElf_Xword);
152 DUMP(p_flags, NACL_PRIxElf_Word);
153 NaClLog(2, " (%s %s %s)\n",
154 (phdr->p_flags & PF_R) ? "PF_R" : "",
155 (phdr->p_flags & PF_W) ? "PF_W" : "",
156 (phdr->p_flags & PF_X) ? "PF_X" : "");
157 DUMP(p_align, NACL_PRIxElf_Xword);
159 NaClLog(loglevel, "\n");
163 NaClErrorCode NaClElfImageValidateElfHeader(struct NaClElfImage *image) {
164 const Elf_Ehdr *hdr = &image->ehdr;
166 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG)) {
167 NaClLog(LOG_ERROR, "bad elf magic\n");
168 return LOAD_BAD_ELF_MAGIC;
171 if (ELFCLASS32 != hdr->e_ident[EI_CLASS]) {
172 NaClLog(LOG_ERROR, "bad elf class\n");
173 return LOAD_NOT_32_BIT;
176 if (ET_EXEC != hdr->e_type) {
177 NaClLog(LOG_ERROR, "non executable\n");
178 return LOAD_NOT_EXEC;
181 if (NACL_ELF_E_MACHINE != hdr->e_machine) {
182 NaClLog(LOG_ERROR, "bad machine: %"NACL_PRIxElf_Half"\n", hdr->e_machine);
183 return LOAD_BAD_MACHINE;
186 if (EV_CURRENT != hdr->e_version) {
187 NaClLog(LOG_ERROR, "bad elf version: %"NACL_PRIxElf_Word"\n",
189 return LOAD_BAD_ELF_VERS;
195 /* TODO(robertm): decouple validation from computation of
196 static_text_end and max_vaddr */
197 NaClErrorCode NaClElfImageValidateProgramHeaders(
198 struct NaClElfImage *image,
200 struct NaClElfImageInfo *info) {
202 * Scan phdrs and do sanity checks in-line. Verify that the load
203 * address is NACL_TRAMPOLINE_END, that we have a single text
204 * segment. Data and TLS segments are not required, though it is
205 * hard to avoid with standard tools, but in any case there should
206 * be at most one each. Ensure that no segment's vaddr is outside
207 * of the address space. Ensure that PT_GNU_STACK is present, and
210 const Elf_Ehdr *hdr = &image->ehdr;
211 int seen_seg[NACL_ARRAY_SIZE(nacl_phdr_check_data)];
217 memset(info, 0, sizeof(*info));
219 info->max_vaddr = NACL_TRAMPOLINE_END;
222 * nacl_phdr_check_data is small, so O(|check_data| * nap->elf_hdr.e_phum)
225 memset(seen_seg, 0, sizeof seen_seg);
226 for (segnum = 0; segnum < hdr->e_phnum; ++segnum) {
227 php = &image->phdrs[segnum];
228 NaClLog(3, "Looking at segment %d, type 0x%x, p_flags 0x%x\n",
229 segnum, php->p_type, php->p_flags);
230 if (0 == php->p_memsz) {
232 * We will not load this segment.
234 NaClLog(3, "Ignoring empty segment\n");
238 for (j = 0; j < NACL_ARRAY_SIZE(nacl_phdr_check_data); ++j) {
239 if (php->p_type == nacl_phdr_check_data[j].p_type
240 && php->p_flags == nacl_phdr_check_data[j].p_flags)
243 if (j == NACL_ARRAY_SIZE(nacl_phdr_check_data)) {
244 /* segment not in nacl_phdr_check_data */
246 "Segment %d is of unexpected type 0x%x, flag 0x%x\n",
250 return LOAD_BAD_SEGMENT;
253 NaClLog(2, "Matched nacl_phdr_check_data[%"NACL_PRIuS"]\n", j);
255 NaClLog(2, "Segment %d is a type that has been seen\n", segnum);
256 return LOAD_DUP_SEGMENT;
260 if (PCA_IGNORE == nacl_phdr_check_data[j].action) {
261 NaClLog(3, "Ignoring\n");
266 * We will load this segment later. Do the sanity checks.
268 if (0 != nacl_phdr_check_data[j].p_vaddr
269 && (nacl_phdr_check_data[j].p_vaddr != php->p_vaddr)) {
271 ("Segment %d: bad virtual address: 0x%08"
273 " expected 0x%08"NACL_PRIxElf_Addr"\n"),
276 nacl_phdr_check_data[j].p_vaddr);
277 return LOAD_SEGMENT_BAD_LOC;
279 if (php->p_vaddr < NACL_TRAMPOLINE_END) {
281 ("Segment %d: virtual address (0x%08"NACL_PRIxElf_Addr
285 return LOAD_SEGMENT_OUTSIDE_ADDRSPACE;
287 if (php->p_vaddr >= ((uint64_t) 1U << addr_bits) ||
288 ((uint64_t) 1U << addr_bits) - php->p_vaddr < php->p_memsz) {
289 if (php->p_vaddr + php->p_memsz < php->p_vaddr) {
291 "Segment %d: p_memsz caused integer overflow\n",
295 "Segment %d: too large, ends at 0x%08"NACL_PRIxElf_Addr"\n",
297 php->p_vaddr + php->p_memsz);
299 return LOAD_SEGMENT_OUTSIDE_ADDRSPACE;
301 if (php->p_filesz > php->p_memsz) {
303 ("Segment %d: file size 0x%08"NACL_PRIxElf_Xword" larger"
304 " than memory size 0x%08"NACL_PRIxElf_Xword"\n"),
308 return LOAD_SEGMENT_BAD_PARAM;
311 image->loadable[segnum] = 1;
312 /* record our decision that we will load this segment */
315 * NACL_TRAMPOLINE_END <= p_vaddr
316 * <= p_vaddr + p_memsz
317 * < ((uintptr_t) 1U << nap->addr_bits)
319 if (info->max_vaddr < php->p_vaddr + php->p_memsz) {
320 info->max_vaddr = php->p_vaddr + php->p_memsz;
323 switch (nacl_phdr_check_data[j].action) {
327 if (0 == php->p_memsz) {
328 return LOAD_BAD_ELF_TEXT;
330 info->static_text_end = NACL_TRAMPOLINE_END + php->p_filesz;
333 info->rodata_start = php->p_vaddr;
334 info->rodata_end = php->p_vaddr + php->p_memsz;
337 info->data_start = php->p_vaddr;
338 info->data_end = php->p_vaddr + php->p_memsz;
344 for (j = 0; j < NACL_ARRAY_SIZE(nacl_phdr_check_data); ++j) {
345 if (nacl_phdr_check_data[j].required && !seen_seg[j]) {
346 return LOAD_REQUIRED_SEG_MISSING;
355 struct NaClElfImage *NaClElfImageNew(struct NaClDesc *ndp,
356 NaClErrorCode *err_code) {
358 struct NaClElfImage *result;
359 struct NaClElfImage image;
362 #if NACL_TARGET_SUBARCH == 64
368 memset(image.loadable, 0, sizeof image.loadable);
371 * We read the larger size of an ELFCLASS64 header even if it turns out
372 * we're reading an ELFCLASS32 file. No usable ELFCLASS32 binary could
373 * be so small that it's not larger than Elf64_Ehdr anyway.
375 read_ret = (*NACL_VTBL(NaClDesc, ndp)->PRead)(ndp, &ehdr, sizeof ehdr, 0);
376 if (NaClSSizeIsNegErrno(&read_ret) || (size_t) read_ret != sizeof ehdr) {
377 *err_code = LOAD_READ_ERROR;
378 NaClLog(2, "could not load elf headers\n");
382 #if NACL_TARGET_SUBARCH == 64
383 if (ELFCLASS64 == ehdr.ehdr64.e_ident[EI_CLASS]) {
385 * Convert ELFCLASS64 format to ELFCLASS32 format.
386 * The initial four fields are the same in both classes.
388 memcpy(image.ehdr.e_ident, ehdr.ehdr64.e_ident, EI_NIDENT);
389 image.ehdr.e_ident[EI_CLASS] = ELFCLASS32;
390 image.ehdr.e_type = ehdr.ehdr64.e_type;
391 image.ehdr.e_machine = ehdr.ehdr64.e_machine;
392 image.ehdr.e_version = ehdr.ehdr64.e_version;
393 if (ehdr.ehdr64.e_entry > 0xffffffffU ||
394 ehdr.ehdr64.e_phoff > 0xffffffffU ||
395 ehdr.ehdr64.e_shoff > 0xffffffffU) {
396 *err_code = LOAD_EHDR_OVERFLOW;
397 NaClLog(2, "ELFCLASS64 file header fields overflow 32 bits\n");
400 image.ehdr.e_entry = (Elf32_Addr) ehdr.ehdr64.e_entry;
401 image.ehdr.e_phoff = (Elf32_Off) ehdr.ehdr64.e_phoff;
402 image.ehdr.e_shoff = (Elf32_Off) ehdr.ehdr64.e_shoff;
403 image.ehdr.e_flags = ehdr.ehdr64.e_flags;
404 if (ehdr.ehdr64.e_ehsize != sizeof(ehdr.ehdr64)) {
405 *err_code = LOAD_BAD_EHSIZE;
406 NaClLog(2, "ELFCLASS64 file e_ehsize != %d\n", (int) sizeof(ehdr.ehdr64));
409 image.ehdr.e_ehsize = sizeof(image.ehdr);
410 image.ehdr.e_phentsize = sizeof(image.phdrs[0]);
411 image.ehdr.e_phnum = ehdr.ehdr64.e_phnum;
412 image.ehdr.e_shentsize = ehdr.ehdr64.e_shentsize;
413 image.ehdr.e_shnum = ehdr.ehdr64.e_shnum;
414 image.ehdr.e_shstrndx = ehdr.ehdr64.e_shstrndx;
418 image.ehdr = ehdr.ehdr32;
421 NaClDumpElfHeader(2, &image.ehdr);
423 *err_code = NaClElfImageValidateElfHeader(&image);
424 if (LOAD_OK != *err_code) {
428 /* read program headers */
429 if (image.ehdr.e_phnum > NACL_MAX_PROGRAM_HEADERS) {
430 *err_code = LOAD_TOO_MANY_PROG_HDRS;
431 NaClLog(2, "too many prog headers\n");
435 #if NACL_TARGET_SUBARCH == 64
436 if (ELFCLASS64 == ehdr.ehdr64.e_ident[EI_CLASS]) {
438 * We'll load the 64-bit phdrs and convert them to 32-bit format.
440 Elf64_Phdr phdr64[NACL_MAX_PROGRAM_HEADERS];
442 if (ehdr.ehdr64.e_phentsize != sizeof(Elf64_Phdr)) {
443 *err_code = LOAD_BAD_PHENTSIZE;
444 NaClLog(2, "bad prog headers size\n");
445 NaClLog(2, " ehdr64.e_phentsize = 0x%"NACL_PRIxElf_Half"\n",
446 ehdr.ehdr64.e_phentsize);
447 NaClLog(2, " sizeof(Elf64_Phdr) = 0x%"NACL_PRIxS"\n",
453 * We know the multiplication won't overflow since we rejected
454 * e_phnum values larger than the small constant NACL_MAX_PROGRAM_HEADERS.
456 read_ret = (*NACL_VTBL(NaClDesc, ndp)->
459 image.ehdr.e_phnum * sizeof phdr64[0],
460 (nacl_off64_t) image.ehdr.e_phoff);
461 if (NaClSSizeIsNegErrno(&read_ret) ||
462 (size_t) read_ret != image.ehdr.e_phnum * sizeof phdr64[0]) {
463 *err_code = LOAD_READ_ERROR;
464 NaClLog(2, "cannot load tp prog headers\n");
468 for (cur_ph = 0; cur_ph < image.ehdr.e_phnum; ++cur_ph) {
469 if (phdr64[cur_ph].p_offset > 0xffffffffU ||
470 phdr64[cur_ph].p_vaddr > 0xffffffffU ||
471 phdr64[cur_ph].p_paddr > 0xffffffffU ||
472 phdr64[cur_ph].p_filesz > 0xffffffffU ||
473 phdr64[cur_ph].p_memsz > 0xffffffffU ||
474 phdr64[cur_ph].p_align > 0xffffffffU) {
475 *err_code = LOAD_PHDR_OVERFLOW;
476 NaClLog(2, "ELFCLASS64 program header fields overflow 32 bits\n");
479 image.phdrs[cur_ph].p_type = phdr64[cur_ph].p_type;
480 image.phdrs[cur_ph].p_offset = (Elf32_Off) phdr64[cur_ph].p_offset;
481 image.phdrs[cur_ph].p_vaddr = (Elf32_Addr) phdr64[cur_ph].p_vaddr;
482 image.phdrs[cur_ph].p_paddr = (Elf32_Addr) phdr64[cur_ph].p_paddr;
483 image.phdrs[cur_ph].p_filesz = (Elf32_Word) phdr64[cur_ph].p_filesz;
484 image.phdrs[cur_ph].p_memsz = (Elf32_Word) phdr64[cur_ph].p_memsz;
485 image.phdrs[cur_ph].p_flags = phdr64[cur_ph].p_flags;
486 image.phdrs[cur_ph].p_align = (Elf32_Word) phdr64[cur_ph].p_align;
491 if (image.ehdr.e_phentsize != sizeof image.phdrs[0]) {
492 *err_code = LOAD_BAD_PHENTSIZE;
493 NaClLog(2, "bad prog headers size\n");
494 NaClLog(2, " image.ehdr.e_phentsize = 0x%"NACL_PRIxElf_Half"\n",
495 image.ehdr.e_phentsize);
496 NaClLog(2, " sizeof image.phdrs[0] = 0x%"NACL_PRIxS"\n",
497 sizeof image.phdrs[0]);
501 read_ret = (*NACL_VTBL(NaClDesc, ndp)->
504 image.ehdr.e_phnum * sizeof image.phdrs[0],
505 (nacl_off64_t) image.ehdr.e_phoff);
506 if (NaClSSizeIsNegErrno(&read_ret) ||
507 (size_t) read_ret != image.ehdr.e_phnum * sizeof image.phdrs[0]) {
508 *err_code = LOAD_READ_ERROR;
509 NaClLog(2, "cannot load tp prog headers\n");
514 NaClLog(2, "=================================================\n");
515 NaClLog(2, "Elf Program headers\n");
516 NaClLog(2, "==================================================\n");
517 for (cur_ph = 0; cur_ph < image.ehdr.e_phnum; ++cur_ph) {
518 NaClDumpElfProgramHeader(2, &image.phdrs[cur_ph]);
521 /* we delay allocating till the end to avoid cleanup code */
522 result = malloc(sizeof image);
524 *err_code = LOAD_NO_MEMORY;
525 NaClLog(LOG_FATAL, "no enough memory for image meta data\n");
528 memcpy(result, &image, sizeof image);
534 * Attempt to map into the NaClApp object nap from the NaCl descriptor
535 * ndp an ELF segment of type p_flags that start at file_offset for
536 * segment_size bytes, to memory starting at paddr (system address).
537 * If it is a code segment, make a scratch mapping and check
538 * validation in readonly_text mode -- if it succeeds, we map into the
539 * target address; if it fails, we return failure so that pread-based
540 * loading can proceed. For rodata and data segments, less checking
541 * is needed. In the text and data case, the end of the segment may
542 * not land on a NACL_MAP_PAGESIZE boundary; when this occurs, we will
543 * map in all whole NACL_MAP_PAGESIZE chunks, and pread in the tail
546 * Returns: LOAD_OK, LOAD_STATUS_UNKNOWN, other error codes.
548 * LOAD_OK -- if the segment has been fully handled
549 * LOAD_STATUS_UNKNOWN -- if pread-based fallback is required
550 * other error codes -- if a fatal error occurs, and the caller
551 * should propagate up
553 * See NaClSysMmapIntern in nacl_syscall_common.c for corresponding
554 * mmap syscall where PROT_EXEC allows shared libraries to be mapped
555 * into dynamic code space.
557 static NaClErrorCode NaClElfFileMapSegment(struct NaClApp *nap,
558 struct NaClDesc *ndp,
561 Elf_Off segment_size,
564 size_t rounded_filesz; /* 64k rounded */
566 uintptr_t image_sys_addr;
567 NaClValidationStatus validator_status = NaClValidationFailed;
568 struct NaClValidationMetadata metadata;
569 int read_last_page_if_partial_allocation_page = 1;
571 struct NaClPerfCounter time_mmap_segment;
572 NaClPerfCounterCtor(&time_mmap_segment, "NaClElfFileMapSegment");
574 rounded_filesz = NaClRoundAllocPage(segment_size);
577 "NaClElfFileMapSegment: checking segment flags 0x%x"
578 " to determine map checks\n",
581 * Is this the text segment? If so, map into scratch memory and
582 * run validation (possibly cached result) with !stubout_mode,
583 * readonly_text. If validator says it's okay, map directly into
584 * target location with NACL_ABI_PROT_READ|_EXEC. If anything
585 * failed, fall back to PRead. NB: the assumption is that there
586 * is only one PT_LOAD with PF_R|PF_X segment; this assumption is
587 * enforced by phdr seen_seg checks above in
588 * NaClElfImageValidateProgramHeaders.
590 * After this function returns, we will be setting memory protection
591 * in NaClMemoryProtection, so the actual memory protection used is
594 * For rodata and data/bss, we mmap with NACL_ABI_PROT_READ or
595 * NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE as appropriate,
596 * without doing validation. There is no fallback to PRead, since
597 * we don't validate the contents.
602 "NaClElfFileMapSegment: text segment and"
603 " file is safe for mmap\n");
604 if (NACL_VTBL(NaClDesc, ndp)->typeTag != NACL_DESC_HOST_IO) {
605 NaClLog(4, "NaClElfFileMapSegment: not supported type, got %d\n",
606 NACL_VTBL(NaClDesc, ndp)->typeTag);
607 return LOAD_STATUS_UNKNOWN;
610 * Unlike the mmap case, we do not re-run validation to
611 * allow patching here; instead, we handle validation
612 * failure by going to the pread_fallback case. In the
613 * future, we should consider doing an in-place mapping and
614 * allowing HLT patch validation, which should be cheaper
615 * since those pages that do not require patching (hopefully
616 * majority) will remain file-backed and not require swap
617 * space, even if we had to fault in every page.
619 NaClLog(1, "NaClElfFileMapSegment: mapping for validation\n");
620 NaClPerfCounterMark(&time_mmap_segment, "PreMap");
621 NaClPerfCounterIntervalLast(&time_mmap_segment);
622 image_sys_addr = (*NACL_VTBL(NaClDesc, ndp)->
624 NaClDescEffectorTrustedMem(),
628 NACL_ABI_MAP_PRIVATE,
630 NaClPerfCounterMark(&time_mmap_segment, "MapForValidate");
631 NaClPerfCounterIntervalLast(&time_mmap_segment);
632 if (NaClPtrIsNegErrno(&image_sys_addr)) {
634 "NaClElfFileMapSegment: Could not make scratch mapping,"
635 " falling back to reading\n");
636 return LOAD_STATUS_UNKNOWN;
638 /* ask validator / validation cache */
639 NaClMetadataFromNaClDescCtor(&metadata, ndp);
640 CHECK(segment_size == nap->static_text_end - NACL_TRAMPOLINE_END);
641 validator_status = NACL_FI_VAL(
642 "ELF_LOAD_FORCE_VALIDATION_STATUS",
643 enum NaClValidationStatus,
646 (uint8_t *) image_sys_addr,
647 segment_size, /* actual size */
648 0, /* stubout_mode: no */
649 1, /* readonly_text: yes */
652 nap->validation_cache));
653 NaClPerfCounterMark(&time_mmap_segment, "ValidateMapped");
654 NaClPerfCounterIntervalLast(&time_mmap_segment);
655 NaClLog(3, "NaClElfFileMapSegment: validator_status %d\n",
657 NaClMetadataDtor(&metadata);
659 * Remove scratch mapping, then map directly into untrusted
660 * address space or pread.
662 NaClDescUnmapUnsafe(ndp, (void *) image_sys_addr,
664 NACL_MAKE_MEM_UNDEFINED((void *) paddr, rounded_filesz);
666 if (NaClValidationSucceeded != validator_status) {
668 ("NaClElfFileMapSegment: readonly_text validation for mmap"
669 " failed. Will retry validation allowing HALT stubbing out"
670 " of unsupported instruction extensions.\n"));
671 return LOAD_STATUS_UNKNOWN;
674 NaClLog(1, "NaClElfFileMapSegment: mapping into code space\n");
676 * Windows appears to not allow RWX mappings. This interferes
677 * with HALT_SLED and having to HALT pad the last page. We
678 * allow partial code pages, so
679 * read_last_page_if_partial_allocation_page will ensure that
680 * the last page is writable, so we will be able to write HALT
681 * instructions as needed.
683 mmap_prot = NACL_ABI_PROT_READ | NACL_ABI_PROT_EXEC;
685 * NB: the log string is used by tests/mmap_main_nexe/nacl.scons
686 * and must be logged at a level that is less than or equal to
687 * the requested verbosity level there.
689 NaClLog(1, "NaClElfFileMapSegment: EXERCISING MMAP LOAD PATH\n");
690 nap->main_exe_prevalidated = 1;
694 /* read-write (initialized data) */
695 mmap_prot = NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE;
697 * NB: the partial page processing will result in zeros
698 * following the initialized data, so that the BSS will be zero.
699 * On a typical system, this page is mapped in and the BSS
700 * region is memset to zero, which means that this partial page
701 * is faulted in. Rather than saving a syscall (pread) and
702 * faulting it in, we just use the same code path as for code,
703 * which is (slightly) simpler.
709 mmap_prot = NACL_ABI_PROT_READ;
711 * For rodata, we allow mapping in "garbage" past a partial
712 * page; this potentially eliminates a disk I/O operation
713 * (if data section has no partial page), possibly delaying
714 * disk spin-up if the code was in the validation cache.
715 * And it saves another 64kB of swap.
717 read_last_page_if_partial_allocation_page = 0;
721 NaClLog(LOG_FATAL, "NaClElfFileMapSegment: unexpected p_flags %d\n",
724 if (rounded_filesz != segment_size &&
725 read_last_page_if_partial_allocation_page) {
726 uintptr_t tail_offset = rounded_filesz - NACL_MAP_PAGESIZE;
727 size_t tail_size = segment_size - tail_offset;
728 NaClLog(4, "NaClElfFileMapSegment: pread tail\n");
729 read_ret = (*NACL_VTBL(NaClDesc, ndp)->
731 (void *) (paddr + tail_offset),
733 (nacl_off64_t) (file_offset + tail_offset));
734 NaClPerfCounterMark(&time_mmap_segment, "PRead tail");
735 NaClPerfCounterIntervalLast(&time_mmap_segment);
736 if (NaClSSizeIsNegErrno(&read_ret) || (size_t) read_ret != tail_size) {
738 "NaClElfFileMapSegment: pread load of page tail failed\n");
739 return LOAD_SEGMENT_BAD_PARAM;
741 rounded_filesz -= NACL_MAP_PAGESIZE;
744 if (rounded_filesz == 0) {
746 "NaClElfFileMapSegment: no pages to map, probably because"
747 " the segment was a partial page, so it was processed by"
751 "NaClElfFileMapSegment: mapping %"NACL_PRIuS" (0x%"
752 NACL_PRIxS") bytes to"
753 " address 0x%"NACL_PRIxPTR", position %"
754 NACL_PRIdElf_Off" (0x%"NACL_PRIxElf_Off")\n",
755 rounded_filesz, rounded_filesz,
757 file_offset, file_offset);
758 image_sys_addr = (*NACL_VTBL(NaClDesc, ndp)->
764 NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_FIXED,
766 NaClPerfCounterMark(&time_mmap_segment, "MapFinal");
767 NaClPerfCounterIntervalLast(&time_mmap_segment);
768 if (image_sys_addr != paddr) {
770 ("NaClElfFileMapSegment: map to 0x%"NACL_PRIxPTR" (prot %x) "
771 "failed: got 0x%"NACL_PRIxPTR"\n"),
772 paddr, mmap_prot, image_sys_addr);
774 /* Tell Valgrind that we've mapped a segment of nacl_file. */
775 NaClFileMappingForValgrind(paddr, rounded_filesz, file_offset);
780 NaClErrorCode NaClElfImageLoad(struct NaClElfImage *image,
781 struct NaClDesc *ndp,
782 struct NaClApp *nap) {
790 for (segnum = 0; segnum < image->ehdr.e_phnum; ++segnum) {
791 const Elf_Phdr *php = &image->phdrs[segnum];
792 Elf_Off offset = (Elf_Off) NaClTruncAllocPage(php->p_offset);
793 Elf_Off filesz = php->p_offset + php->p_filesz - offset;
795 /* did we decide that we will load this segment earlier? */
796 if (!image->loadable[segnum]) {
800 NaClLog(2, "loading segment %d\n", segnum);
802 if (0 == php->p_filesz) {
803 NaClLog(4, "zero-sized segment. ignoring...\n");
807 end_vaddr = php->p_vaddr + php->p_filesz;
808 /* integer overflow? */
809 if (end_vaddr < php->p_vaddr) {
810 NaClLog(LOG_FATAL, "parameter error should have been detected already\n");
813 * is the end virtual address within the NaCl application's
814 * address space? if it is, it implies that the start virtual
817 if (end_vaddr >= ((uintptr_t) 1U << nap->addr_bits)) {
818 NaClLog(LOG_FATAL, "parameter error should have been detected already\n");
821 vaddr = NaClTruncAllocPage(php->p_vaddr);
822 paddr = NaClUserToSysAddr(nap, vaddr);
823 CHECK(kNaClBadAddress != paddr);
826 * Check NaClDescIsSafeForMmap(ndp) to see if it might be okay to
829 NaClLog(4, "NaClElfImageLoad: checking descriptor mmap safety\n");
830 safe_for_mmap = NaClDescIsSafeForMmap(ndp);
832 NaClLog(4, "NaClElfImageLoad: safe-for-mmap\n");
835 if (!safe_for_mmap &&
836 NACL_FI("ELF_LOAD_BYPASS_DESCRIPTOR_SAFETY_CHECK", 0, 1)) {
837 NaClLog(LOG_WARNING, "WARNING: BYPASSING DESCRIPTOR SAFETY CHECK\n");
841 NaClErrorCode map_status;
842 NaClLog(4, "NaClElfImageLoad: safe-for-mmap\n");
843 map_status = NaClElfFileMapSegment(nap, ndp, php->p_flags,
844 offset, filesz, vaddr, paddr);
846 * NB: -Werror=switch-enum forces us to not use a switch.
848 if (LOAD_OK == map_status) {
849 /* Segment has been handled -- proceed to next segment */
851 } else if (LOAD_STATUS_UNKNOWN != map_status) {
853 * A real error! Return it so that this can be reported to
854 * the embedding code (via start_module status).
858 /* Fall through: pread-based fallback requested */
861 "PReading %"NACL_PRIdElf_Xword" (0x%"NACL_PRIxElf_Xword") bytes to"
862 " address 0x%"NACL_PRIxPTR", position %"
863 NACL_PRIdElf_Off" (0x%"NACL_PRIxElf_Off")\n",
869 * Tell valgrind that this memory is accessible and undefined. For more
871 * http://code.google.com/p/nativeclient/wiki/ValgrindMemcheck#Implementation_details
873 NACL_MAKE_MEM_UNDEFINED((void *) paddr, filesz);
875 read_ret = (*NACL_VTBL(NaClDesc, ndp)->
876 PRead)(ndp, (void *) paddr, filesz, (nacl_off64_t) offset);
877 if (NaClSSizeIsNegErrno(&read_ret) || (size_t) read_ret != filesz) {
878 NaClLog(LOG_ERROR, "load failure segment %d", segnum);
879 return LOAD_SEGMENT_BAD_PARAM;
881 /* region from p_filesz to p_memsz should already be zero filled */
883 /* Tell Valgrind that we've mapped a segment of nacl_file. */
884 NaClFileMappingForValgrind(paddr, filesz, offset);
891 NaClErrorCode NaClElfImageLoadDynamically(
892 struct NaClElfImage *image,
894 struct NaClDesc *ndp,
895 struct NaClValidationMetadata *metadata) {
898 for (segnum = 0; segnum < image->ehdr.e_phnum; ++segnum) {
899 const Elf_Phdr *php = &image->phdrs[segnum];
900 Elf_Addr vaddr = php->p_vaddr & ~(NACL_MAP_PAGESIZE - 1);
901 Elf_Off offset = php->p_offset & ~(NACL_MAP_PAGESIZE - 1);
902 Elf_Off filesz = php->p_offset + php->p_filesz - offset;
903 Elf_Off memsz = php->p_offset + php->p_memsz - offset;
907 * We check for PT_LOAD directly rather than using the "loadable"
908 * array because we are not using NaClElfImageValidateProgramHeaders()
909 * to fill out the "loadable" array for this ELF object. This ELF
910 * object does not have to fit such strict constraints (such as
911 * having code at 0x20000), and safety checks are applied by
912 * NaClTextDyncodeCreate() and NaClSysMmapIntern().
914 if (PT_LOAD != php->p_type) {
918 if (0 != (php->p_flags & PF_X)) {
919 /* Load code segment. */
921 * We make a copy of the code. This is not ideal given that this
922 * code path is used only for loading the IRT, and we could assume
923 * that the contents of the irt.nexe file will not change underneath
924 * us. We should be able to mmap() the IRT's code segment instead of
926 * TODO(mseaborn): Reduce the amount of copying here.
928 char *code_copy = malloc(filesz);
929 if (NULL == code_copy) {
930 NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: malloc failed\n");
931 return LOAD_NO_MEMORY;
933 read_ret = (*NACL_VTBL(NaClDesc, ndp)->
934 PRead)(ndp, code_copy, filesz, (nacl_off64_t) offset);
935 if (NaClSSizeIsNegErrno(&read_ret) ||
936 (size_t) read_ret != filesz) {
938 NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: "
939 "failed to read code segment\n");
940 return LOAD_READ_ERROR;
942 if (NULL != metadata) {
943 metadata->code_offset = offset;
945 result = NaClTextDyncodeCreate(nap, (uint32_t) vaddr,
946 code_copy, (uint32_t) filesz, metadata);
949 NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: "
950 "failed to load code segment\n");
951 return LOAD_UNLOADABLE;
954 /* Load data segment. */
955 void *paddr = (void *) NaClUserToSys(nap, vaddr);
956 size_t mapping_size = NaClRoundAllocPage(memsz);
958 * Note that we do not used NACL_ABI_MAP_FIXED because we do not
959 * want to silently overwrite any existing mappings, such as the
960 * user app's data segment or the stack. We detect overmapping
961 * when mmap chooses not to use the preferred address we supply.
962 * (Ideally mmap would provide a MAP_EXCL option for this
965 result = NaClSysMmapIntern(
966 nap, (void *) (uintptr_t) vaddr, mapping_size,
967 NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
968 NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE,
970 if ((int32_t) vaddr != result) {
971 NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: "
972 "failed to map data segment\n");
973 return LOAD_UNLOADABLE;
975 read_ret = (*NACL_VTBL(NaClDesc, ndp)->
976 PRead)(ndp, paddr, filesz, (nacl_off64_t) offset);
977 if (NaClSSizeIsNegErrno(&read_ret) ||
978 (size_t) read_ret != filesz) {
979 NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: "
980 "failed to read data segment\n");
981 return LOAD_READ_ERROR;
984 * Note that we do not need to zero the BSS (the region from
985 * p_filesz to p_memsz) because it should already be zero
986 * filled. This would not be the case if we were mapping the
987 * data segment from the file.
990 if (0 == (php->p_flags & PF_W)) {
991 /* Handle read-only data segment. */
992 int rc = NaClMprotect(paddr, mapping_size, NACL_ABI_PROT_READ);
994 NaClLog(LOG_ERROR, "NaClElfImageLoadDynamically: "
995 "failed to mprotect read-only data segment\n");
996 return LOAD_MPROTECT_FAIL;
999 NaClVmmapAddWithOverwrite(&nap->mem_map,
1000 vaddr >> NACL_PAGESHIFT,
1001 mapping_size >> NACL_PAGESHIFT,
1003 NACL_ABI_MAP_PRIVATE,
1013 void NaClElfImageDelete(struct NaClElfImage *image) {
1018 uintptr_t NaClElfImageGetEntryPoint(struct NaClElfImage *image) {
1019 return image->ehdr.e_entry;